id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
21,400 | import logging
from typing import Any, Dict, Generator, List, Optional, Set, Tuple
import numpy
from onnx import ModelProto, numpy_helper
from sparseml.onnx.utils import DataLoader, DeepSparseAnalyzeModelRunner, ONNXGraph
from sparseml.optim import default_pruning_sparsities_perf
from sparseml.sparsification import Analyzer, AnalyzerProgress, ModelInfo
from sparseml.sparsification import (
PruningLossSensitivityMagnitudeAnalyzer as BasePruningLossMagnitudeAnalyzer,
)
from sparseml.sparsification import (
PruningSensitivityResult,
PruningSensitivityResultTypes,
)
_LOGGER = logging.getLogger(__name__)
def _validate_onnx_model_analyzer(
prunable_param_names: Set[str], model: ModelProto
) -> bool:
if not isinstance(model, ModelProto):
_LOGGER.debug(
"ONNX model Analyzer expected model of type onnx.ModelProto, found: %s",
str(type(model)),
)
return False
initializer_names = {init.name for init in model.graph.initializer}
is_valid = prunable_param_names.issubset(initializer_names)
if not is_valid:
_LOGGER.debug(
"ONNX model Analyzer unable to find prunable params with names %s in "
"model initializer list",
", ".join(prunable_param_names - initializer_names),
)
return is_valid | null |
21,401 | from collections import OrderedDict
from typing import List, Optional, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, numpy_helper
from sparseml.onnx.utils import ONNXGraph, get_node_attributes
from sparseml.sparsification import LayerInfo
from sparseml.sparsification import ModelInfo as BaseModelInfo
def _get_model_first_prunable_nodes(model: ModelProto) -> List[NodeProto]:
graph = ONNXGraph(model)
input_names = {tens.name for tens in model.graph.input}
stack = [
node
for node in model.graph.node
if any(inp in input_names for inp in node.input)
]
seen_node_ids = {output_id for node in stack for output_id in node.output}
first_prunable_nodes = []
while stack:
node = stack.pop()
if node.op_type in ["Gemm", "MatMul", "Conv"]:
first_prunable_nodes.append(node)
continue
for child in graph.get_node_children(node):
if any(output_id in seen_node_ids for output_id in child.output):
continue
stack.append(child)
seen_node_ids.update(set(child.output))
return first_prunable_nodes | null |
21,402 | from collections import OrderedDict
from typing import List, Optional, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, numpy_helper
from sparseml.onnx.utils import ONNXGraph, get_node_attributes
from sparseml.sparsification import LayerInfo
from sparseml.sparsification import ModelInfo as BaseModelInfo
def _get_model_last_prunable_nodes(model: ModelProto) -> List[NodeProto]:
graph = ONNXGraph(model)
output_names = {tens.name for tens in model.graph.output}
stack = [
node
for node in model.graph.node
if any(out in output_names for out in node.output)
]
seen_node_ids = {output_id for node in stack for output_id in node.output}
last_prunable_nodes = []
while stack:
node = stack.pop()
if node.op_type in ["Gemm", "MatMul", "Conv"]:
last_prunable_nodes.append(node)
continue
for parent in graph.get_node_parents(node):
if any(output_id in seen_node_ids for output_id in parent.output):
continue
stack.append(parent)
seen_node_ids.update(set(parent.output))
return last_prunable_nodes | null |
21,403 | from collections import OrderedDict
from typing import List, Optional, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, numpy_helper
from sparseml.onnx.utils import ONNXGraph, get_node_attributes
from sparseml.sparsification import LayerInfo
from sparseml.sparsification import ModelInfo as BaseModelInfo
def _param_sparsity(param: numpy.ndarray) -> float:
# return param sparsity rounded to 4 decimal places
return float(param.size - numpy.count_nonzero(param)) / float(param.size) | null |
21,404 | import logging
from sparseml.sparsification import SparsificationInfo
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `sparsification_info` function. Write a Python function `def sparsification_info() -> SparsificationInfo` to solve the following problem:
Load the available setup for sparsifying model within onnx. :return: The sparsification info for the onnx framework :rtype: SparsificationInfo
Here is the function:
def sparsification_info() -> SparsificationInfo:
"""
Load the available setup for sparsifying model within onnx.
:return: The sparsification info for the onnx framework
:rtype: SparsificationInfo
"""
_LOGGER.debug("getting sparsification info for onnx")
info = SparsificationInfo(modifiers=[]) # TODO: fill in once available
_LOGGER.info("retrieved sparsification info for onnx: %s", info)
return info | Load the available setup for sparsifying model within onnx. :return: The sparsification info for the onnx framework :rtype: SparsificationInfo |
21,405 | import logging
import os
from collections import OrderedDict
from typing import Any, Dict, Iterable, Optional, Tuple, Union
import onnx
from onnx import ModelProto
from sparseml.base import Framework
from sparseml.benchmark import BatchBenchmarkResult, BenchmarkInfo, BenchmarkRunner
from sparseml.framework import FrameworkInfo
from sparseml.framework.info import FrameworkInferenceProviderInfo
from sparseml.onnx.base import require_onnx, require_onnxruntime
from sparseml.onnx.framework import framework_info as get_framework_info
from sparseml.onnx.framework import is_supported
from sparseml.onnx.utils import DataLoader, ORTModelRunner, max_available_cores
from sparsezoo import File, Model
from sparsezoo.utils import DataLoader as SparseZooDataLoader
from sparsezoo.utils import Dataset as SparseZooDataset
def _resolve_device_provider(
framework_info: FrameworkInfo,
device: Optional[str] = None,
provider: Optional[str] = None,
) -> Tuple[str, str]:
if provider is None and device is None:
# Default to first available inference provider
provider = framework_info.inference_providers[0].name
device = framework_info.inference_providers[0].device
elif provider is None:
matching_provider = [
inference_provider
for inference_provider in framework_info.inference_providers
if inference_provider.device == device
]
if len(matching_provider) == 0:
raise ValueError(f"No inference providers available for device {device}.")
provider = matching_provider[0].name
elif device is None:
matching_provider = [
inference_provider
for inference_provider in framework_info.inference_providers
if inference_provider.name == provider
]
if len(matching_provider) == 0:
raise ValueError(
f"No inference providers available for provider {provider}."
)
device = matching_provider[0].device
return device, provider | null |
21,406 | import logging
import os
from collections import OrderedDict
from typing import Any, Dict, Iterable, Optional, Tuple, Union
import onnx
from onnx import ModelProto
from sparseml.base import Framework
from sparseml.benchmark import BatchBenchmarkResult, BenchmarkInfo, BenchmarkRunner
from sparseml.framework import FrameworkInfo
from sparseml.framework.info import FrameworkInferenceProviderInfo
from sparseml.onnx.base import require_onnx, require_onnxruntime
from sparseml.onnx.framework import framework_info as get_framework_info
from sparseml.onnx.framework import is_supported
from sparseml.onnx.utils import DataLoader, ORTModelRunner, max_available_cores
from sparsezoo import File, Model
from sparsezoo.utils import DataLoader as SparseZooDataLoader
from sparsezoo.utils import Dataset as SparseZooDataset
def load_model(model: Any, **kwargs) -> ModelProto:
"""
Loads the model and saves it to a temporary file if necessary
:param model: the model
:param kwargs: additional arguments to pass if loading from a stub
:return: the model loaded as a ModelProto
"""
if not model:
raise ValueError("Model must not be None type")
if isinstance(model, str) and model.startswith("zoo:"):
model = (
Model(model, download_path=kwargs["path"])
if "path" in kwargs
else Model(model)
)
if isinstance(model, Model):
# default to the main onnx file for the model
model = model.onnx_model.path
elif isinstance(model, File):
# get the downloaded_path -- will auto download if not on local system
model = model.path
elif isinstance(model, ModelProto):
return model
if not isinstance(model, str):
raise ValueError("unsupported type for model: {}".format(type(model)))
if not os.path.exists(model):
raise ValueError("model path must exist: given {}".format(model))
return onnx.load(model)
The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data( data: Any, model: Any = None, batch_size: int = 1, total_iterations: int = 0, **kwargs, ) -> Iterable[Tuple[Dict[str, Any], Any]]` to solve the following problem:
Creates a iteratable data loader for the given data. Acceptable types for data are: - a folder path containing numpy files - a list of file paths - a SparseML DataLoader - a SparseZoo DataLoader - an iterable - None type, in which case model must be passed :param data: data to use for benchmarking :param model: model to use for generating data :param batch_size: batch size :param total_iterations: total number of iterations :param kwargs: additional arguments to pass to the DataLoader :return: an iterable of data and labels
Here is the function:
def load_data(
data: Any,
model: Any = None,
batch_size: int = 1,
total_iterations: int = 0,
**kwargs,
) -> Iterable[Tuple[Dict[str, Any], Any]]:
"""
Creates a iteratable data loader for the given data.
Acceptable types for data are:
- a folder path containing numpy files
- a list of file paths
- a SparseML DataLoader
- a SparseZoo DataLoader
- an iterable
- None type, in which case model must be passed
:param data: data to use for benchmarking
:param model: model to use for generating data
:param batch_size: batch size
:param total_iterations: total number of iterations
:param kwargs: additional arguments to pass to the DataLoader
:return: an iterable of data and labels
"""
# Creates random data from model input shapes if data is not provided
if not data:
if not model:
raise ValueError("must provide model or data")
model = load_model(model)
return DataLoader.from_model_random(
model, batch_size, iter_steps=total_iterations
)
# If data is a SparseZoo stub, downloads model data
if isinstance(data, str) and data.startswith("zoo:"):
model_from_zoo = Model(data)
data = model_from_zoo.sample_inputs.loader(
batch_size, total_iterations, batch_as_list=False
)
# Imediately return the data if it is already a DataLoader
if isinstance(data, DataLoader):
return data
# If data is a SparseZoo DataLoader, unbatches the dataloader and creates
# DataLoader from it
elif isinstance(data, SparseZooDataLoader):
datasets = [
SparseZooDataset(name, dataset) for name, dataset in data.datasets.items()
]
data = SparseZooDataLoader(*datasets, batch_size=1, batch_as_list=False)
data = [
OrderedDict(
[
(element, value.reshape(value.shape[1:]))
for element, value in entry.items()
]
)
for entry in data
]
# If data is a dictionary of data shapes, creates DataLoader from random data
elif isinstance(data, dict):
is_dict_of_shapes = True
for _, value in data.items():
is_dict_of_shapes = is_dict_of_shapes and isinstance(value, tuple)
if is_dict_of_shapes:
return DataLoader.from_random(
data,
None,
batch_size=batch_size,
iter_steps=total_iterations,
**kwargs,
)
# If data is a list of data shapes, creates DataLoader from random data
elif isinstance(data, Iterable):
element = next(iter(data))
if isinstance(element, tuple):
data_shapes = OrderedDict(
(f"{index:04}", shape) for index, shape in enumerate(data)
)
return DataLoader.from_random(
data_shapes,
None,
batch_size=batch_size,
iter_steps=total_iterations,
**kwargs,
)
return DataLoader(
data, None, batch_size=batch_size, iter_steps=total_iterations, **kwargs
) | Creates a iteratable data loader for the given data. Acceptable types for data are: - a folder path containing numpy files - a list of file paths - a SparseML DataLoader - a SparseZoo DataLoader - an iterable - None type, in which case model must be passed :param data: data to use for benchmarking :param model: model to use for generating data :param batch_size: batch size :param total_iterations: total number of iterations :param kwargs: additional arguments to pass to the DataLoader :return: an iterable of data and labels |
21,407 | import logging
import os
from collections import OrderedDict
from typing import Any, Dict, Iterable, Optional, Tuple, Union
import onnx
from onnx import ModelProto
from sparseml.base import Framework
from sparseml.benchmark import BatchBenchmarkResult, BenchmarkInfo, BenchmarkRunner
from sparseml.framework import FrameworkInfo
from sparseml.framework.info import FrameworkInferenceProviderInfo
from sparseml.onnx.base import require_onnx, require_onnxruntime
from sparseml.onnx.framework import framework_info as get_framework_info
from sparseml.onnx.framework import is_supported
from sparseml.onnx.utils import DataLoader, ORTModelRunner, max_available_cores
from sparsezoo import File, Model
from sparsezoo.utils import DataLoader as SparseZooDataLoader
from sparsezoo.utils import Dataset as SparseZooDataset
class ORTBenchmarkRunner(BenchmarkRunner):
"""
Benchmark runner for ONNXruntime.
:param model: model to benchmark
:param batch_size: batch size to use for benchmarking
:param iterations: number of iterations to run
:param warmup_iterations: number of warmup iterations to run
:param framework_args: additional arguments to pass to the framework
:param provider: inference provider name to use from available
FrameworkInfo
:param device: device to use for benchmarking
:param ort_provider: provider to use for ONNXruntime
"""
def __init__(
self,
model: Any,
batch_size: int = 1,
iterations: int = 0,
warmup_iterations: int = 0,
framework_args: Dict[str, Any] = {},
provider: str = "cpu",
device: str = "cpu",
ort_provider: Optional[str] = None,
**kwargs,
):
if iterations < 0:
raise ValueError(
"iterations must be non-negative, where 0 will run entire dataset."
)
if batch_size < 1:
raise ValueError("batch_size must be positive.")
if warmup_iterations < 0:
raise ValueError("warmup_iterations must be non-negative.")
self._model = load_model(model)
self._framework_info = get_framework_info()
self._package_versions = self._framework_info.package_versions
device, provider = _resolve_device_provider(
self._framework_info, device=device, provider=provider
)
if "ort_provider" in framework_args:
ort_provider = framework_args["ort_provider"]
if ort_provider is None:
if device == "cpu":
ort_provider = CPU_DEFAULT_ORT_PROVIDER
elif device == "gpu":
possible_ort_providers = [
provider
for provider in GPU_ORT_PROVIDERS
if provider
in self._framework_info.properties["available_providers"]
]
if len(possible_ort_providers) > 0:
ort_provider = possible_ort_providers[0]
else:
_LOGGER.warn(
"No Onnx Runtime GPU providers installed. Defaulting to CPU"
)
device, provider = _resolve_device_provider(
self._framework_info, device="cpu"
)
ort_provider = CPU_DEFAULT_ORT_PROVIDER
inference_providers = [
inference_provider
for inference_provider in self._framework_info.inference_providers
if inference_provider.name == provider
and inference_provider.device == device
]
if len(inference_providers) == 0:
raise ValueError(f"No supported inference provider found for {provider}.")
if ort_provider not in self._framework_info.properties["available_providers"]:
raise ValueError(f"Provider {ort_provider} not installed.")
self._model_runner = ORTModelRunner(
self._model,
batch_size=batch_size,
providers=[ort_provider],
**framework_args,
)
self._inference_provider = inference_providers[0]
self._provider = provider
self._device = device
self._framework_args = framework_args
self._batch_size = batch_size
self._iterations = iterations
self._warmup_iterations = warmup_iterations
def run_batch(
self, batch: Union[Dict[str, Any], Tuple[Dict[str, Any], Any]], *args, **kwargs
) -> BatchBenchmarkResult:
"""
Runs a benchmark on a given batch.
:param batch: the batch to benchmark
:param args: additional arguments to pass to the framework
:param kwargs: additional arguments to pass to the framework
"""
# Handles case where batch consists of a tuple of input/labels
if isinstance(batch, tuple):
batch = batch[0]
_, batch_time = self._model_runner.batch_forward(batch, *args, **kwargs)
return BatchBenchmarkResult.from_result(batch_time, self.batch_size)
def framework(self) -> Framework:
"""
:return: the framework
"""
return Framework.onnx
def framework_info(self) -> FrameworkInfo:
"""
:return: the framework info
"""
return self._framework_info
def batch_size(self) -> int:
"""
:return: the batch size
"""
return self._batch_size
def warmup_iterations(self) -> int:
"""
:return: the warmup iterations
"""
return self._warmup_iterations
def iterations(self) -> int:
"""
:return: the number of iterations
"""
return self._iterations
def num_cores(self) -> str:
"""
:return: the number of cores
"""
return max_available_cores()
def inference_provider(self) -> FrameworkInferenceProviderInfo:
"""
:return: the inference provider
"""
return self._inference_provider
def package_versions(self) -> Dict[str, str]:
"""
:return: the package versions
"""
return self._package_versions
def framework_args(self) -> Dict[str, Any]:
"""
:return: the framework args
"""
return self._framework_args
def device(self) -> str:
"""
:return: the device
"""
return self._device
def model(self) -> ModelProto:
"""
:return: the model as an ONNX ModelProto
"""
return self._model
def load_model(model: Any, **kwargs) -> ModelProto:
"""
Loads the model and saves it to a temporary file if necessary
:param model: the model
:param kwargs: additional arguments to pass if loading from a stub
:return: the model loaded as a ModelProto
"""
if not model:
raise ValueError("Model must not be None type")
if isinstance(model, str) and model.startswith("zoo:"):
model = (
Model(model, download_path=kwargs["path"])
if "path" in kwargs
else Model(model)
)
if isinstance(model, Model):
# default to the main onnx file for the model
model = model.onnx_model.path
elif isinstance(model, File):
# get the downloaded_path -- will auto download if not on local system
model = model.path
elif isinstance(model, ModelProto):
return model
if not isinstance(model, str):
raise ValueError("unsupported type for model: {}".format(type(model)))
if not os.path.exists(model):
raise ValueError("model path must exist: given {}".format(model))
return onnx.load(model)
The provided code snippet includes necessary dependencies for implementing the `run_benchmark` function. Write a Python function `def run_benchmark( model: Any, data: Any = None, batch_size: int = 1, iterations: int = 0, warmup_iterations: int = 0, provider: Optional[str] = "cpu", device: Optional[str] = "cpu", framework_args: Dict[str, Any] = {}, show_progress: bool = True, **kwargs, ) -> BenchmarkInfo` to solve the following problem:
Run a benchmark for the given model. :param model: model to benchmark :param data: data to benchmark :param batch_size: batch size :param iterations: number of iterations :param warmup_iterations: number of warmup iterations :param framework: the specific framework run the benchmark in :param provider: the specific inference provider to use :param device: the specific device to use :param save_path: path to save the benchmark results :param framework_args: additional framework specific arguments to pass to the runner :param show_progress: True to show a tqdm bar when running, False otherwise :param kwargs: Additional arguments to pass to the framework. :return: BenchmarkInfo
Here is the function:
def run_benchmark(
model: Any,
data: Any = None,
batch_size: int = 1,
iterations: int = 0,
warmup_iterations: int = 0,
provider: Optional[str] = "cpu",
device: Optional[str] = "cpu",
framework_args: Dict[str, Any] = {},
show_progress: bool = True,
**kwargs,
) -> BenchmarkInfo:
"""
Run a benchmark for the given model.
:param model: model to benchmark
:param data: data to benchmark
:param batch_size: batch size
:param iterations: number of iterations
:param warmup_iterations: number of warmup iterations
:param framework: the specific framework run the benchmark in
:param provider: the specific inference provider to use
:param device: the specific device to use
:param save_path: path to save the benchmark results
:param framework_args: additional framework specific arguments to
pass to the runner
:param show_progress: True to show a tqdm bar when running, False otherwise
:param kwargs: Additional arguments to pass to the framework.
:return: BenchmarkInfo
"""
model = load_model(model)
if is_supported(model):
benchmark_runner = ORTBenchmarkRunner(
model,
batch_size=batch_size,
iterations=iterations,
warmup_iterations=warmup_iterations,
provider=provider,
device=device,
framework_args=framework_args,
show_progress=show_progress,
**kwargs,
)
results = benchmark_runner.run(data, show_progress=show_progress)
return BenchmarkInfo(
framework=benchmark_runner.framework,
package_versions=benchmark_runner.package_versions,
benchmark=results,
config=benchmark_runner.benchmark_config,
)
else:
raise ValueError(
"Model is not supported by the onnxruntime backend. "
"Please check the model for support."
) | Run a benchmark for the given model. :param model: model to benchmark :param data: data to benchmark :param batch_size: batch size :param iterations: number of iterations :param warmup_iterations: number of warmup iterations :param framework: the specific framework run the benchmark in :param provider: the specific inference provider to use :param device: the specific device to use :param save_path: path to save the benchmark results :param framework_args: additional framework specific arguments to pass to the runner :param show_progress: True to show a tqdm bar when running, False otherwise :param kwargs: Additional arguments to pass to the framework. :return: BenchmarkInfo |
21,408 | import logging
import numbers
import time
from typing import Any, Generator, List, NamedTuple, Tuple, Union
import numpy
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.utils import (
DataLoader,
DeepSparseAnalyzeModelRunner,
DeepSparseModelRunner,
ORTModelRunner,
extract_node_id,
get_node_params,
get_prunable_nodes,
kl_divergence,
prune_model_one_shot,
update_model_param,
)
from sparseml.optim import (
PruningLossSensitivityAnalysis,
PruningPerfSensitivityAnalysis,
PruningSensitivityResult,
default_pruning_sparsities_loss,
default_pruning_sparsities_perf,
)
from sparseml.utils import flatten_iterable
from sparsezoo.utils import load_model
The provided code snippet includes necessary dependencies for implementing the `pruning_loss_sens_approx` function. Write a Python function `def pruning_loss_sens_approx( input_shape: Union[None, List[int], List[List[int]]], output_shape: Union[None, List[int]], params: int, apply_shape_change_mult: bool = True, ) -> float` to solve the following problem:
Approximate the pruning sensitivity of a Neural Network's layer based on the params and metadata for a given layer :param input_shape: the input shape to the layer :param output_shape: the output shape from the layer :param params: the number of params in the layer :param apply_shape_change_mult: True to adjust the sensitivity based on a weight derived from a change in input to output shape (any change is considered to be more sensitive), False to not apply :return: the approximated pruning sensitivity for the layer's settings
Here is the function:
def pruning_loss_sens_approx(
input_shape: Union[None, List[int], List[List[int]]],
output_shape: Union[None, List[int]],
params: int,
apply_shape_change_mult: bool = True,
) -> float:
"""
Approximate the pruning sensitivity of a Neural Network's layer
based on the params and metadata for a given layer
:param input_shape: the input shape to the layer
:param output_shape: the output shape from the layer
:param params: the number of params in the layer
:param apply_shape_change_mult: True to adjust the sensitivity based on
a weight derived from a change in input to output shape
(any change is considered to be more sensitive), False to not apply
:return: the approximated pruning sensitivity for the layer's settings
"""
if not params:
return 0.0
if input_shape:
input_shape = flatten_iterable(input_shape)
input_shape = [
size for size in input_shape if size and isinstance(size, numbers.Number)
]
input_volume = 0 if not input_shape else numpy.prod(input_shape).item()
if output_shape:
output_shape = flatten_iterable(output_shape)
output_shape = [
size for size in output_shape if size and isinstance(size, numbers.Number)
]
output_volume = 0 if not output_shape else numpy.prod(output_shape).item()
total_volume = input_volume + output_volume
features_per_params = total_volume / float(params)
shape_change_mult = (
1.0
if not apply_shape_change_mult or not input_volume or not output_volume
else max(input_volume / output_volume, output_volume / input_volume)
)
return features_per_params * shape_change_mult | Approximate the pruning sensitivity of a Neural Network's layer based on the params and metadata for a given layer :param input_shape: the input shape to the layer :param output_shape: the output shape from the layer :param params: the number of params in the layer :param apply_shape_change_mult: True to adjust the sensitivity based on a weight derived from a change in input to output shape (any change is considered to be more sensitive), False to not apply :return: the approximated pruning sensitivity for the layer's settings |
21,409 | import logging
import numbers
import time
from typing import Any, Generator, List, NamedTuple, Tuple, Union
import numpy
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.utils import (
DataLoader,
DeepSparseAnalyzeModelRunner,
DeepSparseModelRunner,
ORTModelRunner,
extract_node_id,
get_node_params,
get_prunable_nodes,
kl_divergence,
prune_model_one_shot,
update_model_param,
)
from sparseml.optim import (
PruningLossSensitivityAnalysis,
PruningPerfSensitivityAnalysis,
PruningSensitivityResult,
default_pruning_sparsities_loss,
default_pruning_sparsities_perf,
)
from sparseml.utils import flatten_iterable
from sparsezoo.utils import load_model
def pruning_loss_sens_magnitude_iter(
model: Union[str, ModelProto],
sparsity_levels: Union[
List[float], Tuple[float, ...]
] = default_pruning_sparsities_loss(True),
) -> Generator[
Tuple[PruningLossSensitivityAnalysis, KSSensitivityProgress], None, None
]:
"""
Approximated kernel sparsity (pruning) loss analysis for a given model.
Iteratively builds a KSLossSensitivityAnalysis object and yields an updated
version after each layer is run. The final result is the complete
analysis object.
:param model: the loaded model or a file path to the onnx model
to calculate the sparse sensitivity analysis for
:param sparsity_levels: the sparsity levels to calculate the loss for for each param
:return: the analysis results for the model with an additional layer at each
iteration along with a float representing the iteration progress
"""
model = load_model(model)
prunable = get_prunable_nodes(model)
analysis = PruningLossSensitivityAnalysis()
num_layers = len(prunable)
for index, node in enumerate(prunable):
node_id = extract_node_id(node)
yield analysis, KSSensitivityProgress(
index, node_id, num_layers, float(index) / float(num_layers)
)
weight, bias = get_node_params(model, node)
values = numpy.sort(numpy.abs(weight.val.flatten()))
prev_index = 0
for sparsity in sparsity_levels:
val_index = round(sparsity * values.size)
if val_index >= len(values):
val_index = len(values) - 1
if sparsity <= 1e-9:
baseline = True
sparsity = 0.0
sparse_avg = 0.0
else:
baseline = False
if val_index > prev_index:
sparse_avg = values[prev_index:val_index].mean().item()
prev_index = val_index
else:
sparse_avg = values[val_index].item()
prev_index = val_index + 1
analysis.add_result(
node_id, weight.name, index, sparsity, sparse_avg, baseline
)
yield analysis, KSSensitivityProgress(num_layers, None, num_layers, 1.0)
The provided code snippet includes necessary dependencies for implementing the `pruning_loss_sens_magnitude` function. Write a Python function `def pruning_loss_sens_magnitude( model: Union[str, ModelProto], sparsity_levels: Union[ List[float], Tuple[float, ...] ] = default_pruning_sparsities_loss(True), show_progress: bool = True, ) -> PruningLossSensitivityAnalysis` to solve the following problem:
Approximated kernel sparsity (pruning) loss analysis for a given model. Returns the results for each prunable param (conv, linear) in the model. :param model: the loaded model or a file path to the onnx model to calculate the sparse sensitivity analysis for :param sparsity_levels: the sparsity levels to calculate the loss for for each param :param show_progress: True to log the progress with a tqdm bar, False otherwise :return: the analysis results for the model
Here is the function:
def pruning_loss_sens_magnitude(
model: Union[str, ModelProto],
sparsity_levels: Union[
List[float], Tuple[float, ...]
] = default_pruning_sparsities_loss(True),
show_progress: bool = True,
) -> PruningLossSensitivityAnalysis:
"""
Approximated kernel sparsity (pruning) loss analysis for a given model.
Returns the results for each prunable param (conv, linear) in the model.
:param model: the loaded model or a file path to the onnx model
to calculate the sparse sensitivity analysis for
:param sparsity_levels: the sparsity levels to calculate the loss for for each param
:param show_progress: True to log the progress with a tqdm bar, False otherwise
:return: the analysis results for the model
"""
analysis = None
bar = None
for (analysis, progress) in pruning_loss_sens_magnitude_iter(
model, sparsity_levels
):
if bar is None and show_progress:
bar = auto.tqdm(total=progress.total, desc="KS Loss Sensitivity Analysis")
if bar is not None and progress.val < 1.0:
bar.update(1)
if bar is not None:
bar.close()
return analysis | Approximated kernel sparsity (pruning) loss analysis for a given model. Returns the results for each prunable param (conv, linear) in the model. :param model: the loaded model or a file path to the onnx model to calculate the sparse sensitivity analysis for :param sparsity_levels: the sparsity levels to calculate the loss for for each param :param show_progress: True to log the progress with a tqdm bar, False otherwise :return: the analysis results for the model |
21,410 | import logging
import numbers
import time
from typing import Any, Generator, List, NamedTuple, Tuple, Union
import numpy
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.utils import (
DataLoader,
DeepSparseAnalyzeModelRunner,
DeepSparseModelRunner,
ORTModelRunner,
extract_node_id,
get_node_params,
get_prunable_nodes,
kl_divergence,
prune_model_one_shot,
update_model_param,
)
from sparseml.optim import (
PruningLossSensitivityAnalysis,
PruningPerfSensitivityAnalysis,
PruningSensitivityResult,
default_pruning_sparsities_loss,
default_pruning_sparsities_perf,
)
from sparseml.utils import flatten_iterable
from sparsezoo.utils import load_model
def pruning_loss_sens_one_shot_iter(
model: Union[str, ModelProto],
data: DataLoader,
batch_size: int,
steps_per_measurement: int,
sparsity_levels: List[float] = default_pruning_sparsities_loss(False),
use_deepsparse_inference: bool = False,
) -> Generator[
Tuple[PruningLossSensitivityAnalysis, KSSensitivityProgress], None, None
]:
"""
Run a one shot sensitivity analysis for kernel sparsity.
It does not retrain.
Moves layer by layer to calculate the sensitivity analysis for each and
resets the previously run layers.
Updates and yeilds the KSLossSensitivityAnalysis at each layer.
The loss is calculated by taking the kl_divergence of
pruned values from the baseline.
:param model: the loaded model or a file path to the onnx model
to calculate the sparse sensitivity analysis for
:param data: the data to run through the model
:param batch_size: the batch size the data is created for
:param steps_per_measurement: number of steps (batches) to run through
the model for each sparsity level on each node
:param sparsity_levels: the sparsity levels to calculate the loss for for each param
:param use_deepsparse_inference: True to use the DeepSparse inference engine
to run the analysis, False to use onnxruntime
:return: the sensitivity results for every node that is prunable,
yields update at each layer along with iteration progress
"""
model = load_model(model)
prunable_nodes = get_prunable_nodes(model)
analysis = PruningLossSensitivityAnalysis()
num_updates = len(prunable_nodes) * len(sparsity_levels) + 1
update_num = 0
yield analysis, KSSensitivityProgress(update_num, None, num_updates, 0.0)
runner = (
ORTModelRunner(model)
if not use_deepsparse_inference
else DeepSparseModelRunner(model, batch_size)
)
_LOGGER.debug("created runner for one shot analysis {}".format(runner))
base_outputs, _ = runner.run(
data,
desc="",
show_progress=False,
max_steps=steps_per_measurement,
)
_LOGGER.debug("recorded base outputs")
del runner
for index, node in enumerate(prunable_nodes):
node_id = extract_node_id(node)
weight, bias = get_node_params(model, node)
_LOGGER.debug("running one shot for node {}".format(node_id))
for sparsity in sparsity_levels:
update_num += 1
yield analysis, KSSensitivityProgress(
update_num,
{"node_id": node_id, "sparsity": sparsity},
num_updates,
float(update_num) / float(num_updates),
)
prune_model_one_shot(model, [node], sparsity)
_LOGGER.debug(
"created one shot pruned model for sparsity {}".format(sparsity)
)
runner = (
ORTModelRunner(model)
if not use_deepsparse_inference
else DeepSparseModelRunner(model, batch_size)
)
_LOGGER.debug("created runner for one shot analysis {}".format(runner))
pruned_outputs, _ = runner.run(
data,
desc="",
show_progress=False,
max_steps=steps_per_measurement,
)
del runner
_LOGGER.debug("recorded outputs")
for base, pruned in zip(base_outputs, pruned_outputs):
batch_losses = []
for key, base_array in base.items():
pruned_array = pruned[key]
loss = kl_divergence(
pruned_array,
base_array,
min(base_array.min(), pruned_array.min()),
)
batch_losses.append(loss)
analysis.add_result(
node_id,
weight.name,
index,
sparsity,
sum(batch_losses),
baseline=sparsity < 1e-9,
)
# reset node to its baseline density
update_model_param(model, weight.name, weight.val)
yield analysis, KSSensitivityProgress(num_updates, None, num_updates, 1.0)
The provided code snippet includes necessary dependencies for implementing the `pruning_loss_sens_one_shot` function. Write a Python function `def pruning_loss_sens_one_shot( model: Union[str, ModelProto], data: DataLoader, batch_size: int, steps_per_measurement: int, sparsity_levels: List[float] = default_pruning_sparsities_loss(False), show_progress: bool = True, use_deepsparse_inference: bool = False, ) -> PruningLossSensitivityAnalysis` to solve the following problem:
Run a one shot sensitivity analysis for kernel sparsity. It does not retrain,. Moves layer by layer to calculate the sensitivity analysis for each and resets the previously run layers. The loss is calculated by taking the kl_divergence of pruned values from the baseline. :param model: the loaded model or a file path to the onnx model to calculate the sparse sensitivity analysis for :param data: the data to run through the model :param batch_size: the batch size the data is created for :param steps_per_measurement: number of steps (batches) to run through the model for each sparsity level on each node :param sparsity_levels: the sparsity levels to calculate the loss for for each param :param show_progress: True to log the progress with a tqdm bar, False otherwise :param use_deepsparse_inference: True to use the DeepSparse inference engine to run the analysis, False to use onnxruntime :return: the sensitivity results for every node that is prunable
Here is the function:
def pruning_loss_sens_one_shot(
model: Union[str, ModelProto],
data: DataLoader,
batch_size: int,
steps_per_measurement: int,
sparsity_levels: List[float] = default_pruning_sparsities_loss(False),
show_progress: bool = True,
use_deepsparse_inference: bool = False,
) -> PruningLossSensitivityAnalysis:
"""
Run a one shot sensitivity analysis for kernel sparsity.
It does not retrain,.
Moves layer by layer to calculate the sensitivity analysis for each and
resets the previously run layers.
The loss is calculated by taking the kl_divergence of
pruned values from the baseline.
:param model: the loaded model or a file path to the onnx model
to calculate the sparse sensitivity analysis for
:param data: the data to run through the model
:param batch_size: the batch size the data is created for
:param steps_per_measurement: number of steps (batches) to run through
the model for each sparsity level on each node
:param sparsity_levels: the sparsity levels to calculate the loss for for each param
:param show_progress: True to log the progress with a tqdm bar, False otherwise
:param use_deepsparse_inference: True to use the DeepSparse inference engine
to run the analysis, False to use onnxruntime
:return: the sensitivity results for every node that is prunable
"""
analysis = None
bar = None
for (analysis, progress) in pruning_loss_sens_one_shot_iter(
model,
data,
batch_size,
steps_per_measurement,
sparsity_levels,
use_deepsparse_inference,
):
if bar is None and show_progress:
bar = auto.tqdm(total=progress.total, desc="KS Loss Sensitivity Analysis")
if bar is not None and progress.val < 1.0:
bar.update(1)
if bar is not None:
bar.close()
return analysis | Run a one shot sensitivity analysis for kernel sparsity. It does not retrain,. Moves layer by layer to calculate the sensitivity analysis for each and resets the previously run layers. The loss is calculated by taking the kl_divergence of pruned values from the baseline. :param model: the loaded model or a file path to the onnx model to calculate the sparse sensitivity analysis for :param data: the data to run through the model :param batch_size: the batch size the data is created for :param steps_per_measurement: number of steps (batches) to run through the model for each sparsity level on each node :param sparsity_levels: the sparsity levels to calculate the loss for for each param :param show_progress: True to log the progress with a tqdm bar, False otherwise :param use_deepsparse_inference: True to use the DeepSparse inference engine to run the analysis, False to use onnxruntime :return: the sensitivity results for every node that is prunable |
21,411 | import logging
import numbers
import time
from typing import Any, Generator, List, NamedTuple, Tuple, Union
import numpy
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.utils import (
DataLoader,
DeepSparseAnalyzeModelRunner,
DeepSparseModelRunner,
ORTModelRunner,
extract_node_id,
get_node_params,
get_prunable_nodes,
kl_divergence,
prune_model_one_shot,
update_model_param,
)
from sparseml.optim import (
PruningLossSensitivityAnalysis,
PruningPerfSensitivityAnalysis,
PruningSensitivityResult,
default_pruning_sparsities_loss,
default_pruning_sparsities_perf,
)
from sparseml.utils import flatten_iterable
from sparsezoo.utils import load_model
def pruning_perf_sens_one_shot_iter(
model: Union[str, ModelProto],
data: DataLoader,
batch_size: int,
num_cores: int = None,
iterations_per_check: int = 10,
warmup_iterations_per_check: int = 5,
sparsity_levels: List[float] = default_pruning_sparsities_perf(),
optimization_level: int = 0,
iters_sleep_time: float = -1,
) -> Generator[
Tuple[PruningPerfSensitivityAnalysis, KSSensitivityProgress], None, None
]:
"""
Run a one shot sensitivity analysis for kernel sparsity.
Runs a baseline and then sets the sparsity for each layer to a given range
of values as defined in sparsity_levels to measure their performance for pruning.
Yields the current KSPerfSensitivityAnalysis after each sparsity level is run.
:param model: the loaded model or a file path to the onnx model
to calculate the sparse sensitivity analysis for
:param data: the data to run through the model
:param batch_size: the size of the batch to create the model in neural magic for
:param num_cores: number of physical cores to run on. Default is the maximum number
of cores available
:param iterations_per_check: number of iterations to run for perf details
:param warmup_iterations_per_check: number of iterations to run before perf details
:param sparsity_levels: the sparsity levels to calculate the loss for for each param
:param optimization_level: the optimization level to pass to the DeepSparse
inference engine for how much to optimize the model.
Valid values are either 0 for minimal optimizations or 1 for maximal.
:param iters_sleep_time: the time to sleep the thread between analysis benchmark
iterations to allow for other processes to run.
:return: the sensitivity results for every node that is prunable yields update
at each layer along with iteration progress
"""
if not DeepSparseAnalyzeModelRunner.available():
raise ModuleNotFoundError(
"deepsparse is not installed on the system, cannot run"
)
analysis = PruningPerfSensitivityAnalysis(num_cores, batch_size)
runner = DeepSparseAnalyzeModelRunner(model, batch_size, num_cores)
_LOGGER.debug("created runner for one shot analysis {}".format(runner))
for idx, sparsity in enumerate(sparsity_levels):
if sparsity <= 1e-9:
# override for the engine which needs None to not impose sparsity
sparsity = None
yield analysis, KSSensitivityProgress(
idx,
sparsity,
len(sparsity_levels),
float(idx) / float(len(sparsity_levels)),
)
results, _ = runner.run(
data,
show_progress=False,
num_iterations=iterations_per_check,
num_warmup_iterations=warmup_iterations_per_check,
optimization_level=optimization_level,
imposed_ks=sparsity,
)
_LOGGER.debug("measured results for one shot sparsity {}".format(sparsity))
for res in results:
for iter_time in res["iteration_times"]:
analysis.add_model_result(
sparsity if sparsity is not None else 0.0,
iter_time / 1000.0,
baseline=sparsity is None,
)
for index, layer in enumerate(res["layer_info"]):
analysis.add_result(
layer["canonical_name"],
layer["name"],
index,
sparsity if sparsity is not None else layer["kernel_sparsity"],
layer["average_run_time_in_ms"] / 1000.0,
baseline=sparsity is None,
)
if iters_sleep_time >= 0.0:
time.sleep(iters_sleep_time) # hack to release GIL between runs
yield analysis, KSSensitivityProgress(
len(sparsity_levels),
None,
len(sparsity_levels),
1.0,
)
The provided code snippet includes necessary dependencies for implementing the `pruning_perf_sens_one_shot` function. Write a Python function `def pruning_perf_sens_one_shot( model: Union[str, ModelProto], data: DataLoader, batch_size: int, num_cores: int = None, iterations_per_check: int = 10, warmup_iterations_per_check: int = 5, sparsity_levels: List[float] = default_pruning_sparsities_perf(), show_progress: bool = True, wait_between_iters: bool = False, ) -> PruningPerfSensitivityAnalysis` to solve the following problem:
Run a one shot sensitivity analysis for kernel sparsity. Runs a baseline and then sets the sparsity for each layer to a given range of values as defined in sparsity_levels to measure their performance for pruning. :param model: the loaded model or a file path to the onnx model to calculate the sparse sensitivity analysis for :param data: the data to run through the model :param batch_size: the size of the batch to create the model in neural magic for :param num_cores: number of physical cores to run on. Default is the maximum available :param iterations_per_check: number of iterations to run for perf details :param warmup_iterations_per_check: number of iterations to run before perf details :param sparsity_levels: the sparsity levels to calculate the loss for for each param :param show_progress: True to log the progress with a tqdm bar, False otherwise :param wait_between_iters: if True, will sleep the thread 0.25s between analysis benchmark iterations to allow for other processes to run. :return: the sensitivity results for every node that is prunable
Here is the function:
def pruning_perf_sens_one_shot(
model: Union[str, ModelProto],
data: DataLoader,
batch_size: int,
num_cores: int = None,
iterations_per_check: int = 10,
warmup_iterations_per_check: int = 5,
sparsity_levels: List[float] = default_pruning_sparsities_perf(),
show_progress: bool = True,
wait_between_iters: bool = False,
) -> PruningPerfSensitivityAnalysis:
"""
Run a one shot sensitivity analysis for kernel sparsity.
Runs a baseline and then sets the sparsity for each layer to a given range
of values as defined in sparsity_levels to measure their performance for pruning.
:param model: the loaded model or a file path to the onnx model
to calculate the sparse sensitivity analysis for
:param data: the data to run through the model
:param batch_size: the size of the batch to create the model in neural magic for
:param num_cores: number of physical cores to run on. Default is the maximum
available
:param iterations_per_check: number of iterations to run for perf details
:param warmup_iterations_per_check: number of iterations to run before perf details
:param sparsity_levels: the sparsity levels to calculate the loss for for each param
:param show_progress: True to log the progress with a tqdm bar, False otherwise
:param wait_between_iters: if True, will sleep the thread 0.25s between analysis
benchmark iterations to allow for other processes to run.
:return: the sensitivity results for every node that is prunable
"""
analysis = None
bar = None
for (analysis, progress) in pruning_perf_sens_one_shot_iter(
model,
data,
batch_size,
num_cores,
iterations_per_check,
warmup_iterations_per_check,
sparsity_levels,
wait_between_iters,
):
if bar is None and show_progress:
bar = auto.tqdm(total=progress.total, desc="KS Perf Sensitivity Analysis")
if bar is not None and progress.val < 1.0:
bar.update(1)
if bar is not None:
bar.close()
return analysis | Run a one shot sensitivity analysis for kernel sparsity. Runs a baseline and then sets the sparsity for each layer to a given range of values as defined in sparsity_levels to measure their performance for pruning. :param model: the loaded model or a file path to the onnx model to calculate the sparse sensitivity analysis for :param data: the data to run through the model :param batch_size: the size of the batch to create the model in neural magic for :param num_cores: number of physical cores to run on. Default is the maximum available :param iterations_per_check: number of iterations to run for perf details :param warmup_iterations_per_check: number of iterations to run before perf details :param sparsity_levels: the sparsity levels to calculate the loss for for each param :param show_progress: True to log the progress with a tqdm bar, False otherwise :param wait_between_iters: if True, will sleep the thread 0.25s between analysis benchmark iterations to allow for other processes to run. :return: the sensitivity results for every node that is prunable |
21,412 | from typing import Dict, List, Set, Union
import onnx
from sparseml.onnx.utils import ONNXGraph, get_node_attributes
_PRUNABLE_OP_TYPES = ["Conv", "Gemm", "MatMul"]
def _get_node_dependency_names(
graph: ONNXGraph, node: onnx.NodeProto, structure_type: str
) -> Set[str]:
# returns a list of parameters whose should be pruned to match
# the target dimensions of this node
unchecked_nodes = _get_next_layer_deps(graph, node, structure_type)
seen_output_ids = _get_node_output_ids(unchecked_nodes)
dependent_params = set()
if structure_type == "filter" and len(node.input) > 2:
# node bias depends on num filters
dependent_params.add(node.input[2])
while unchecked_nodes:
current_node = unchecked_nodes.pop(0)
if not isinstance(current_node, onnx.NodeProto):
continue
if current_node.op_type in _OUTPUT_CHANNEL_OP_TYPES:
prunable = current_node.op_type in _PRUNABLE_OP_TYPES
params = (
list(current_node.input[1:]) # skip layer input tensor
if not (prunable and structure_type != "filter")
else [current_node.input[1]] # bias not dependent on prev filter
)
for param in params:
if graph.get_init_by_name(param) is not None:
dependent_params.add(param)
if prunable and not _is_group_conv(current_node):
# continue on other branches, do not go past prunable nodes
continue
dep_nodes = _get_next_layer_deps(graph, current_node, structure_type)
for dep_node in dep_nodes:
dep_node_ids = _get_node_output_ids(dep_node)
if dep_node_ids.isdisjoint(seen_output_ids):
unchecked_nodes.append(dep_node)
seen_output_ids.update(dep_node_ids)
return dependent_params
The provided code snippet includes necessary dependencies for implementing the `get_param_structured_pruning_group_dependencies` function. Write a Python function `def get_param_structured_pruning_group_dependencies( model: Union[onnx.ModelProto, str], structure_type: str = "filter", ) -> Dict[str, List[str]]` to solve the following problem:
:param model: model to generate pruning groups and dependencies for :param structure_type: valid options are 'filter' and 'channel'. Generates dependency map for corresponding pruning scheme. Default is 'filter' :return: dictionary of parameter names that should be grouped during structured pruning to a list of parameter names whose parameters should be updated accordingly to the param group pruning results. prunable parameter names will be represented as a comma separated string
Here is the function:
def get_param_structured_pruning_group_dependencies(
model: Union[onnx.ModelProto, str],
structure_type: str = "filter",
) -> Dict[str, List[str]]:
"""
:param model: model to generate pruning groups and dependencies for
:param structure_type: valid options are 'filter' and 'channel'. Generates
dependency map for corresponding pruning scheme. Default is 'filter'
:return: dictionary of parameter names that should be grouped during
structured pruning to a list of parameter names whose parameters should
be updated accordingly to the param group pruning results. prunable parameter
names will be represented as a comma separated string
"""
if structure_type not in ["filter", "channel"]:
raise ValueError(
f"invalid structure_type {structure_type}. not in ['filter', 'channel']"
)
if isinstance(model, str):
model = onnx.load(model)
graph = ONNXGraph(model)
param_name_to_dependents = {} # Dict[str, Set[str]]
for node in model.graph.node:
if node.op_type not in _PRUNABLE_OP_TYPES or (
graph.get_init_by_name(node.input[1]) is None
):
# main param not found or not prunable
continue
param_name_to_dependents[node.input[1]] = _get_node_dependency_names(
graph, node, structure_type
)
# merge disjoint sets of dependencies (could improve with union-find)
prunable_param_group_to_dep_params = [] # List[Tuple[List, Set]]
for prunable_param_name, dep_params in param_name_to_dependents.items():
intersected_group_idxs = {
idx
for idx, (_, group_dep_params) in enumerate(
prunable_param_group_to_dep_params
)
if not dep_params.isdisjoint(group_dep_params)
}
new_group_val = ([prunable_param_name], dep_params)
if not intersected_group_idxs:
prunable_param_group_to_dep_params.append(new_group_val)
else:
non_intersected_vals = []
for idx, (prunable_param_group, group_dep_params) in enumerate(
prunable_param_group_to_dep_params
):
if idx not in intersected_group_idxs:
non_intersected_vals.append(
(prunable_param_group, group_dep_params)
)
else:
new_group_val = (
new_group_val[0] + prunable_param_group,
new_group_val[1].union(group_dep_params),
)
prunable_param_group_to_dep_params = non_intersected_vals + [new_group_val]
return {
",".join(prunable_param_group): list(dependent_params)
for prunable_param_group, dependent_params in prunable_param_group_to_dep_params
} | :param model: model to generate pruning groups and dependencies for :param structure_type: valid options are 'filter' and 'channel'. Generates dependency map for corresponding pruning scheme. Default is 'filter' :return: dictionary of parameter names that should be grouped during structured pruning to a list of parameter names whose parameters should be updated accordingly to the param group pruning results. prunable parameter names will be represented as a comma separated string |
21,413 | from typing import Iterable, List, Union
import onnx
from tqdm.auto import tqdm
from sparseml.onnx.optim.quantization.calibration import CalibrationSession
from sparseml.onnx.optim.quantization.quantize import QuantizationMode, quantize
from sparseml.onnx.utils import DataLoader, quantize_resnet_identity_add_inputs
from sparsezoo.utils import save_onnx
class CalibrationSession:
"""
Class for performing quantization calibration on an Onnx model.
:param onnx_file: File path to saved Onnx model to calibrate
:param calibrate_op_types: List of Onnx ops names to calibrate and quantize within
the model. Currently Onnx only supports quantizing 'Conv' and 'MatMul' ops.
:param exclude_nodes: List of operator names that should not be quantized
:param include_nodes: List of operator names to force to be quantized
:param augmented_model_path: file path to save augmented model to for verification
:param static: True to use static quantization. Default is True
"""
def __init__(
self,
onnx_file: str,
calibrate_op_types: Iterable[str] = ("Conv", "MatMul", "Gemm"),
exclude_nodes: List[str] = None,
include_nodes: List[str] = None,
augmented_model_path: str = None,
static: bool = True,
):
self._onnx_file = onnx_file
self._calibrate_op_types = list(calibrate_op_types)
self._exclude_nodes = exclude_nodes or []
self._include_nodes = include_nodes or []
self._augmented_model_path = augmented_model_path
self._static = static
self._model = onnx.load(self._onnx_file)
self._optimized_model_path = self._optimize_model()
self._model_augmented = self.generate_augmented_model()
if self._augmented_model_path is None:
self._augmented_model_path = os.path.join(
os.getcwd(), "model_augmented.onnx"
)
save_onnx(self._model_augmented, self._augmented_model_path)
_LOGGER.debug(f"Created an augmented model at: {self._augmented_model_path}")
self._sessions = {} # batch_size -> session
self._quantization_thresholds = {} # Dict[node.name, Tuple(min_val, max_val)]
def model(self):
"""
:return: The loaded model, if optimization has run,
will be the optimized version
"""
return self._model
def model_augmented(self):
"""
:return: The augmented model, if optimization has run,
will be the optimized version
"""
return self._model_augmented
def _optimize_model(self) -> Union[str, None]:
"""
Perform batch norm folding in model if possible.
:return: The tmp file path to the optimized model if optimization is successful
otherwise returns None and the original model is not changed
"""
try:
print("Optimizing {}...".format(self._onnx_file))
model_optimized = fold_conv_bns(self._onnx_file)
if model_optimized is None:
# no optimization performed, skip the rest of this block
raise Exception()
validate_onnx(model_optimized) # should raise exception if broken
optimized_model_path = os.path.join(os.getcwd(), "model_optimized.onnx")
save_onnx(model_optimized, optimized_model_path)
self._model = model_optimized
_LOGGER.debug(
"Optimization successful. "
"Created an optimized model at: "
f"{optimized_model_path}"
)
return optimized_model_path
except Exception as e:
print(e)
print(
(
"WARNING: no conv-batch norms folded for {}, using original model"
).format(self._onnx_file)
)
return None
def get_model_input_names(self) -> List[str]:
"""
:return: List of input names to the model
"""
return [node.name for node in self._model.graph.input]
def add_reduce_to_node_output(
self, node: onnx.NodeProto, output_edge: str, op_type: str
) -> Tuple[onnx.NodeProto, onnx.ValueInfoProto]:
"""
:param node: the node to add the reduce op to
:param output_edge: the output of node to generate reduce op for
:param op_type: the reduce operation name
:return: a tuple of the reduce operation node and its output
"""
if node is not None and node.name != "":
reduce_name = node.name + "_{}".format(op_type)
else: # Should be an input
reduce_name = output_edge + "_{}".format(op_type)
reduce_node = onnx.helper.make_node(
op_type,
[output_edge],
[output_edge + "_{}".format(op_type)],
reduce_name,
keepdims=0,
)
reduce_node_output = onnx.helper.make_tensor_value_info(
reduce_node.output[0], onnx.TensorProto.FLOAT, ()
)
return reduce_node, reduce_node_output
def _get_input_node_for_edge(self, input_edge: str) -> onnx.NodeProto:
"""
:param input_edge: name of graph edge to get input node for
:return: the node in the original model that is the input to the
destination of the given input_edge
"""
for node in self._model.graph.node:
if input_edge in node.output:
return node
return None
def generate_augmented_model(self) -> onnx.ModelProto:
"""
return: A new Onnx model with ReduceMin and ReduceMax nodes added to all
quantizable nodes in the original model and ensures their outputs are
stored as part of the graph output.
"""
added_nodes = []
added_outputs = []
edges_already_calibrated = []
for node in self._model.graph.node:
should_calibrate = (
(node.op_type in self._calibrate_op_types)
and (node.name not in self._exclude_nodes)
) or (node.name in self._include_nodes)
if should_calibrate:
to_calibrate = []
input_name = node.output[0]
if input_name not in edges_already_calibrated:
edges_already_calibrated.append(input_name)
to_calibrate.append((node, input_name))
if self._static:
# In static mode, we precompute the min/max for the inputs as well
for input_name in node.input:
if input_name not in edges_already_calibrated:
edges_already_calibrated.append(input_name)
input_node = self._get_input_node_for_edge(input_name)
to_calibrate.append((input_node, input_name))
for calib_node, output_edge in to_calibrate:
(reduce_node, reduce_node_output,) = self.add_reduce_to_node_output(
calib_node, output_edge, "ReduceMin"
)
added_nodes.append(reduce_node)
added_outputs.append(reduce_node_output)
(reduce_node, reduce_node_output,) = self.add_reduce_to_node_output(
calib_node, output_edge, "ReduceMax"
)
added_nodes.append(reduce_node)
added_outputs.append(reduce_node_output)
# use optimized model if available
base_model_path = self._optimized_model_path or self._onnx_file
augmented_model = onnx.load(base_model_path)
augmented_model.graph.node.extend(added_nodes)
augmented_model.graph.output.extend(added_outputs)
return augmented_model
def _iter_calib_ops_output(
self,
outputs: List[np.ndarray],
) -> Generator[Tuple[str, float, float], None, None]:
"""
:param outputs: the outputs of a run of the augmented model
:return: A generator that for every augmented operation yields
the operation name, the value of the REDUCE_MIN operator,
and the value of the REDUCE_MAX operator associated with
the operation.
"""
num_orig_outputs = len(self._model.graph.output)
output_names = [
output_obj.name for output_obj in self._model_augmented.graph.output
]
calib_output_names = output_names[num_orig_outputs:]
calib_outputs = outputs[num_orig_outputs:]
# Iterate through outputs in pairs of min, max
assert len(calib_output_names) % 2 == 0
for idx in range(0, len(calib_output_names), 2):
min_op_name = calib_output_names[idx]
max_op_name = calib_output_names[idx + 1]
base_op_name = min_op_name.split("_Reduce")[0]
# Check that the pairs match and min and max ops are in the right order
assert "ReduceMin" in min_op_name
assert "ReduceMax" in max_op_name
if base_op_name != max_op_name.split("_Reduce")[0]:
raise RuntimeError(
"Unexpected reduce output pair: {}, {}".format(
min_op_name, max_op_name
)
)
yield base_op_name, calib_outputs[idx], calib_outputs[idx + 1]
def process_batch(self, input_batch: Dict[str, np.ndarray]) -> None:
"""
Updates the model's calibration thresholds based on a run of the input batch
:param input_batch: Dictionary of pre-processed model input batch to use, with
input names mapped to a numpy array of the batch
"""
batch_size = list(input_batch.values())[0].shape[0]
if batch_size not in self._sessions:
self._sessions[batch_size] = ORTModelRunner(
self._augmented_model_path, batch_size=batch_size
)
outputs, _ = self._sessions[batch_size].batch_forward(input_batch)
# extract just output values from ordered dict
outputs = list(outputs.values())
for op_name, min_val, max_val in self._iter_calib_ops_output(outputs):
if op_name not in self._quantization_thresholds:
self._quantization_thresholds[op_name] = (min_val, max_val)
else:
op_prev_min, op_prev_max = self._quantization_thresholds[op_name]
self._quantization_thresholds[op_name] = (
min(op_prev_min, min_val),
max(op_prev_max, max_val),
)
def get_quantization_params_dict(self) -> Dict[str, List[Union[int, float]]]:
"""
:return: A dictionary of quantization parameters based on the original
model and calibrated quantization thresholds from runs of the
process_batch function. The format of the dictionary will be:
{"param_name": [zero_point, scale]}
"""
quantization_params = {}
for idx, node in enumerate(self._model.graph.node):
node_output_name = node.output[0]
if node_output_name in self._quantization_thresholds:
range_min, range_max = self._quantization_thresholds[node_output_name]
next_nodes = get_node_output_nodes(self._model, node)
# only pass next_node for optimization if there is 1
next_node = next_nodes[0] if len(next_nodes) == 1 else None
node_params = CalibrationSession._calculate_scale_zeropoint(
range_min, range_max, next_node
)
quantization_params[node_output_name] = node_params
# Add model inputs to quantization_params
for input_name in self.get_model_input_names():
if (
input_name in self._quantization_thresholds
and input_name not in quantization_params
):
range_min, range_max = self._quantization_thresholds[input_name]
inp_params = CalibrationSession._calculate_scale_zeropoint(
range_min, range_max, None
)
quantization_params[input_name] = inp_params
return quantization_params
def _calculate_scale_zeropoint(
range_min: float,
range_max: float,
next_node: Union[None, onnx.NodeProto],
) -> List[Union[int, float]]:
# adjust range_min and range_max such that 0 is included in the range.
# to make sure zero can be uniquely represented.
range_min = min(range_min, 0)
range_max = max(range_max, 0)
# We update the output range min and max when next node is clip or relu
# With this technique we can remove these 2 ops and
# reduce the output range which in turn helps to improve accuracy
if next_node is not None:
if next_node.op_type == "Clip":
clip_min = next_node.attribute[0].f
clip_max = next_node.attribute[1].f
if range_min < clip_min:
range_min = clip_min
if range_max > clip_max:
range_max = clip_max
if next_node.op_type == "Relu":
if range_min < 0:
range_min = 0
scale = np.float32(
(range_max - range_min) / 255 if range_min != range_max else 1
)
initial_zero_point = (0 - range_min) / scale
zero_point = np.uint8(round(max(0, min(255, initial_zero_point))))
return [zero_point, scale]
def __del__(self):
"""
Cleans up any unnecessary files.
"""
if self._optimized_model_path is not None:
os.remove(self._optimized_model_path)
if self._augmented_model_path is not None:
os.remove(self._augmented_model_path)
class QuantizationMode:
IntegerOps = 0
QLinearOps = 1
def quantize(
model,
per_channel=False,
nbits=8,
quantization_mode=QuantizationMode.IntegerOps,
static=False,
force_fusions=False,
symmetric_activation=False,
symmetric_weight=False,
quantization_params=None,
nodes_to_quantize=None,
nodes_to_exclude=None,
):
"""
Given an onnx model, create a quantized onnx model and save it into a file
:param model: ModelProto to quantize
:param per_channel: quantize weights per channel
:param nbits: number of bits to represent quantized data. Currently only supporting 8-bit types
:param quantization_mode: Can be one of the QuantizationMode types.
IntegerOps:
the function will use integer ops. Only ConvInteger and MatMulInteger ops are supported now.
QLinearOps:
the function will use QLinear ops. Only QLinearConv and QLinearMatMul ops are supported now.
:param static:
True: The inputs/activations are quantized using static scale and zero point values
specified through quantization_params.
False: The inputs/activations are quantized using dynamic scale and zero point values
computed while running the model.
:param force_fusions:
True: Fuses nodes added for dynamic quantization
False: No fusion is applied for nodes which are added for dynamic quantization.
Should be only used in cases where backends want to apply special fusion routines
:param symmetric_activation:
True: activations are quantized into signed integers.
False: activations are quantized into unsigned integers.
:param symmetric_weight:
True: weights are quantized into signed integers.
False: weights are quantized into unsigned integers.
:param quantization_params:
Dictionary to specify the zero point and scale values for inputs to conv and matmul nodes.
Should be specified when static is set to True.
The quantization_params should be specified in the following format:
{
"input_name": [zero_point, scale]
}.
zero_point should be of type np.uint8 and scale should be of type np.float32.
example:
{
'resnet_model/Relu_1:0': [np.uint8(0), np.float32(0.019539741799235344)],
'resnet_model/Relu_2:0': [np.uint8(0), np.float32(0.011359662748873234)]
}
:return: ModelProto with quantization
:param nodes_to_quantize:
List of nodes names to quantize. When this list is not None only the nodes in this list
are quantized.
example:
[
'Conv__224',
'Conv__252'
]
:param nodes_to_exclude:
List of nodes names to exclude. The nodes in this list will be excluded from quantization
when it is not None.
"""
if nbits == 8:
input_qType = (
onnx_proto.TensorProto.INT8
if symmetric_activation
else onnx_proto.TensorProto.UINT8
)
weight_qType = (
onnx_proto.TensorProto.INT8
if symmetric_weight
else onnx_proto.TensorProto.UINT8
)
mode = quantization_mode
copy_model = onnx_proto.ModelProto()
copy_model.CopyFrom(model)
fuse_dynamic_quant = check_opset_version(copy_model, force_fusions)
quantizer = ONNXQuantizer(
copy_model,
per_channel,
mode,
static,
fuse_dynamic_quant,
weight_qType,
input_qType,
quantization_params,
nodes_to_quantize,
nodes_to_exclude,
)
quantizer.quantize_model()
quantizer.model.producer_name = __producer__
quantizer.model.producer_version = __version__
return quantizer.model
else:
raise ValueError("Only 8 bit quantization is currently supported")
The provided code snippet includes necessary dependencies for implementing the `quantize_model_post_training` function. Write a Python function `def quantize_model_post_training( onnx_file: str, data_loader: DataLoader, output_model_path: str = None, calibrate_op_types: Iterable[str] = ("Conv", "MatMul", "Gemm"), exclude_nodes: List[str] = None, include_nodes: List[str] = None, augmented_model_path: str = None, static: bool = True, symmetric_weight: bool = False, force_fusions: bool = False, show_progress: bool = True, run_extra_opt: bool = True, ) -> Union[None, onnx.ModelProto]` to solve the following problem:
Wrapper function for calibrating and quantizing an Onnx model :param onnx_file: File path to saved Onnx model to calibrate and quantize :param data_loader: Iterable of lists of model inputs or filepath to directory of numpy arrays. If the model has multiple inputs and an .npz file is provided, the function will try to extract each input from the .npz file by name. If the names do not match, the function will try to extract the inputs in order. Will raise an exception of the number of inputs does not match the number of arrays in the .npz file. :param output_model_path: Filepath to where the quantized model should be saved to. If not provided, then the quantized Onnx model object will be returned instead. :param calibrate_op_types: List of Onnx ops names to calibrate and quantize within the model. Currently Onnx only supports quantizing 'Conv' and 'MatMul' ops. :param exclude_nodes: List of operator names that should not be quantized :param include_nodes: List of operator names force to be quantized :param augmented_model_path: file path to save augmented model to for verification :param static: True to use static quantization. Default is static. :param symmetric_weight: True to use symmetric weight quantization. Default is False :param force_fusions: True to force fusions in quantization. Default is False :param show_progress: If true, will display a tqdm progress bar during calibration. Default is True :param run_extra_opt: If true, will run additional optimizations on the quantized model. Currently the only optimization is quantizing identity relu outputs in ResNet blocks :return: None or quantized onnx model object if output_model_path is not provided
Here is the function:
def quantize_model_post_training(
onnx_file: str,
data_loader: DataLoader,
output_model_path: str = None,
calibrate_op_types: Iterable[str] = ("Conv", "MatMul", "Gemm"),
exclude_nodes: List[str] = None,
include_nodes: List[str] = None,
augmented_model_path: str = None,
static: bool = True,
symmetric_weight: bool = False,
force_fusions: bool = False,
show_progress: bool = True,
run_extra_opt: bool = True,
) -> Union[None, onnx.ModelProto]:
"""
Wrapper function for calibrating and quantizing an Onnx model
:param onnx_file: File path to saved Onnx model to calibrate and quantize
:param data_loader: Iterable of lists of model inputs or filepath to directory
of numpy arrays. If the model has multiple inputs and an .npz file is
provided, the function will try to extract each input from the .npz file
by name. If the names do not match, the function will try to extract the
inputs in order. Will raise an exception of the number of inputs does not
match the number of arrays in the .npz file.
:param output_model_path: Filepath to where the quantized model should be saved to.
If not provided, then the quantized Onnx model object will be returned instead.
:param calibrate_op_types: List of Onnx ops names to calibrate and quantize within
the model. Currently Onnx only supports quantizing 'Conv' and 'MatMul' ops.
:param exclude_nodes: List of operator names that should not be quantized
:param include_nodes: List of operator names force to be quantized
:param augmented_model_path: file path to save augmented model to for verification
:param static: True to use static quantization. Default is static.
:param symmetric_weight: True to use symmetric weight quantization.
Default is False
:param force_fusions: True to force fusions in quantization. Default is False
:param show_progress: If true, will display a tqdm progress bar during calibration.
Default is True
:param run_extra_opt: If true, will run additional optimizations on the quantized
model. Currently the only optimization is quantizing identity relu outputs in
ResNet blocks
:return: None or quantized onnx model object if output_model_path is not provided
"""
calibrator = CalibrationSession(
onnx_file,
calibrate_op_types,
exclude_nodes,
include_nodes,
augmented_model_path,
static,
)
# data_loader must have a finite number of examples
assert not data_loader.infinite
data_iterator = tqdm(data_loader) if show_progress else data_loader
for input_batch, _ in data_iterator:
calibrator.process_batch(input_batch)
quantization_params_dict = calibrator.get_quantization_params_dict()
calibrated_quantized_model = quantize(
calibrator.model,
quantization_mode=QuantizationMode.QLinearOps,
force_fusions=force_fusions,
quantization_params=quantization_params_dict,
nodes_to_exclude=exclude_nodes if exclude_nodes else None,
symmetric_weight=symmetric_weight,
static=static,
)
if run_extra_opt:
quantize_resnet_identity_add_inputs(calibrated_quantized_model)
if output_model_path is None:
return calibrated_quantized_model
else:
save_onnx(calibrated_quantized_model, output_model_path) | Wrapper function for calibrating and quantizing an Onnx model :param onnx_file: File path to saved Onnx model to calibrate and quantize :param data_loader: Iterable of lists of model inputs or filepath to directory of numpy arrays. If the model has multiple inputs and an .npz file is provided, the function will try to extract each input from the .npz file by name. If the names do not match, the function will try to extract the inputs in order. Will raise an exception of the number of inputs does not match the number of arrays in the .npz file. :param output_model_path: Filepath to where the quantized model should be saved to. If not provided, then the quantized Onnx model object will be returned instead. :param calibrate_op_types: List of Onnx ops names to calibrate and quantize within the model. Currently Onnx only supports quantizing 'Conv' and 'MatMul' ops. :param exclude_nodes: List of operator names that should not be quantized :param include_nodes: List of operator names force to be quantized :param augmented_model_path: file path to save augmented model to for verification :param static: True to use static quantization. Default is static. :param symmetric_weight: True to use symmetric weight quantization. Default is False :param force_fusions: True to force fusions in quantization. Default is False :param show_progress: If true, will display a tqdm progress bar during calibration. Default is True :param run_extra_opt: If true, will run additional optimizations on the quantized model. Currently the only optimization is quantizing identity relu outputs in ResNet blocks :return: None or quantized onnx model object if output_model_path is not provided |
21,414 | import numpy as np
import onnx
import onnx.numpy_helper
from onnx import onnx_pb as onnx_proto
from onnx import shape_inference
The provided code snippet includes necessary dependencies for implementing the `quantize_data` function. Write a Python function `def quantize_data(data, quantize_range, qType)` to solve the following problem:
:parameter data: data to quantize :parameter quantize_range: list of data to weight pack. :parameter qType: data type to quantize to. Supported types UINT8 and INT8 :return: minimum, maximum, zero point, scale, and quantized weights To pack weights, we compute a linear transformation - when data type == uint8 mode, from [rmin, rmax] -> [0, 2^{b-1}] and - when data type == int8, from [-m , m] -> [-(2^{b-1}-1), 2^{b-1}-1] where m = max(abs(rmin), abs(rmax)) and add necessary intermediate nodes to trasnform quantized weight to full weight using the equation r = S(q-z), where r: real original value q: quantized value S: scale z: zero point
Here is the function:
def quantize_data(data, quantize_range, qType):
"""
:parameter data: data to quantize
:parameter quantize_range: list of data to weight pack.
:parameter qType: data type to quantize to. Supported types UINT8 and INT8
:return: minimum, maximum, zero point, scale, and quantized weights
To pack weights, we compute a linear transformation
- when data type == uint8 mode, from [rmin, rmax] -> [0, 2^{b-1}] and
- when data type == int8, from [-m , m] -> [-(2^{b-1}-1), 2^{b-1}-1] where
m = max(abs(rmin), abs(rmax))
and add necessary intermediate nodes to trasnform quantized weight to full weight using the equation
r = S(q-z), where
r: real original value
q: quantized value
S: scale
z: zero point
"""
rmin = min(min(data), 0)
rmax = max(max(data), 0)
if qType == onnx_proto.TensorProto.INT8:
max_range = max(abs(rmin), abs(rmax))
scale = (float(max_range) * 2) / quantize_range
zero_point = 0
# signed byte type
quantized_data = (np.asarray(data) / scale).round().astype("b")
elif qType == onnx_proto.TensorProto.UINT8:
# Original ORT Code:
# scale = (float(rmax) - rmin) / quantize_range if rmin != rmax else 1
# zero_point = round((0 - rmin) / scale) # round to nearest integer
# Modifications for compatibility with NMIE
max_range = max(abs(rmin), abs(rmax))
scale = (float(max_range) * 2) / quantize_range
zero_point = 128
quantized_data = ((np.asarray(data) / scale).round() + zero_point).astype(
"B"
) # unsigned byte type
else:
raise ValueError(
"Unexpected data type {} requested. Only INT8 and UINT8 are supported.".format(
qType
)
)
return rmin, rmax, zero_point, scale, quantized_data | :parameter data: data to quantize :parameter quantize_range: list of data to weight pack. :parameter qType: data type to quantize to. Supported types UINT8 and INT8 :return: minimum, maximum, zero point, scale, and quantized weights To pack weights, we compute a linear transformation - when data type == uint8 mode, from [rmin, rmax] -> [0, 2^{b-1}] and - when data type == int8, from [-m , m] -> [-(2^{b-1}-1), 2^{b-1}-1] where m = max(abs(rmin), abs(rmax)) and add necessary intermediate nodes to trasnform quantized weight to full weight using the equation r = S(q-z), where r: real original value q: quantized value S: scale z: zero point |
21,415 | import numpy as np
import onnx
import onnx.numpy_helper
from onnx import onnx_pb as onnx_proto
from onnx import shape_inference
The provided code snippet includes necessary dependencies for implementing the `_attribute_to_kwarg` function. Write a Python function `def _attribute_to_kwarg(attribute)` to solve the following problem:
Convert attribute to kwarg format for use with onnx.helper.make_node. :parameter attribute: attribute in AttributeProto format. :return: attribute in {key: value} format.
Here is the function:
def _attribute_to_kwarg(attribute):
"""
Convert attribute to kwarg format for use with onnx.helper.make_node.
:parameter attribute: attribute in AttributeProto format.
:return: attribute in {key: value} format.
"""
if attribute.type == 0:
raise ValueError(
"attribute {} does not have type specified.".format(attribute.name)
)
# Based on attribute type definitions from AttributeProto
# definition in https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
if attribute.type == 1:
value = attribute.f
elif attribute.type == 2:
value = attribute.i
elif attribute.type == 3:
value = attribute.s
elif attribute.type == 4:
value = attribute.t
elif attribute.type == 5:
value = attribute.g
elif attribute.type == 6:
value = attribute.floats
elif attribute.type == 7:
value = attribute.ints
elif attribute.type == 8:
value = attribute.strings
elif attribute.type == 9:
value = attribute.tensors
elif attribute.type == 10:
value = attribute.graphs
else:
raise ValueError(
"attribute {} has unsupported type {}.".format(
attribute.name, attribute.type
)
)
return {attribute.name: value} | Convert attribute to kwarg format for use with onnx.helper.make_node. :parameter attribute: attribute in AttributeProto format. :return: attribute in {key: value} format. |
21,416 | import numpy as np
import onnx
import onnx.numpy_helper
from onnx import onnx_pb as onnx_proto
from onnx import shape_inference
The provided code snippet includes necessary dependencies for implementing the `_get_mul_node` function. Write a Python function `def _get_mul_node(inputs, output, name)` to solve the following problem:
Helper function to create a Mul node. parameter inputs: list of input names. parameter output: output name. parameter name: name of the node. return: Mul node in NodeProto format.
Here is the function:
def _get_mul_node(inputs, output, name):
"""
Helper function to create a Mul node.
parameter inputs: list of input names.
parameter output: output name.
parameter name: name of the node.
return: Mul node in NodeProto format.
"""
return onnx.helper.make_node("Mul", inputs, [output], name) | Helper function to create a Mul node. parameter inputs: list of input names. parameter output: output name. parameter name: name of the node. return: Mul node in NodeProto format. |
21,417 | import numpy as np
import onnx
import onnx.numpy_helper
from onnx import onnx_pb as onnx_proto
from onnx import shape_inference
def _find_by_name(item_name, item_list):
"""
Helper function to find item by name in a list.
parameter item_name: name of the item.
parameter item_list: list of items.
return: item if found. None otherwise.
"""
items = [item for item in item_list if item.name == item_name]
return items[0] if len(items) > 0 else None
The provided code snippet includes necessary dependencies for implementing the `_find_node_by_name` function. Write a Python function `def _find_node_by_name(node_name, graph, new_nodes_list)` to solve the following problem:
Helper function to check if a node exists in a graph or new set of nodes created during quantization. parameter node_name: name of the node. parameter graph: GraphProto. parameter new_nodes_list: list of nodes added during quantization. return: NodeProto if found. None otherwise.
Here is the function:
def _find_node_by_name(node_name, graph, new_nodes_list):
"""
Helper function to check if a node exists in a graph or
new set of nodes created during quantization.
parameter node_name: name of the node.
parameter graph: GraphProto.
parameter new_nodes_list: list of nodes added during quantization.
return: NodeProto if found. None otherwise.
"""
graph_nodes_list = list(graph.node) # deep copy
graph_nodes_list.extend(new_nodes_list)
node = _find_by_name(node_name, graph_nodes_list)
return node | Helper function to check if a node exists in a graph or new set of nodes created during quantization. parameter node_name: name of the node. parameter graph: GraphProto. parameter new_nodes_list: list of nodes added during quantization. return: NodeProto if found. None otherwise. |
21,418 | import numpy as np
import onnx
import onnx.numpy_helper
from onnx import onnx_pb as onnx_proto
from onnx import shape_inference
def _find_by_name(item_name, item_list):
"""
Helper function to find item by name in a list.
parameter item_name: name of the item.
parameter item_list: list of items.
return: item if found. None otherwise.
"""
items = [item for item in item_list if item.name == item_name]
return items[0] if len(items) > 0 else None
The provided code snippet includes necessary dependencies for implementing the `_add_initializer_if_not_present` function. Write a Python function `def _add_initializer_if_not_present(graph, name, value, shape, type)` to solve the following problem:
Helper function to add an initializer if it is not present in the graph. parameter graph: GraphProto. parameter name: Initializer's name. parameter value: Initializer's value. parameter shape: Initializer's shape. parameter type: Initializer's type.
Here is the function:
def _add_initializer_if_not_present(graph, name, value, shape, type):
"""
Helper function to add an initializer if it is not present in the graph.
parameter graph: GraphProto.
parameter name: Initializer's name.
parameter value: Initializer's value.
parameter shape: Initializer's shape.
parameter type: Initializer's type.
"""
if _find_by_name(name, graph.initializer) is None:
initializer = onnx.helper.make_tensor(name, type, shape, value)
graph.initializer.extend([initializer]) | Helper function to add an initializer if it is not present in the graph. parameter graph: GraphProto. parameter name: Initializer's name. parameter value: Initializer's value. parameter shape: Initializer's shape. parameter type: Initializer's type. |
21,419 | import numpy as np
import onnx
import onnx.numpy_helper
from onnx import onnx_pb as onnx_proto
from onnx import shape_inference
The provided code snippet includes necessary dependencies for implementing the `_get_qrange_for_qType` function. Write a Python function `def _get_qrange_for_qType(qType)` to solve the following problem:
Helper function to get the quantization range for a type. parameter qType: quantization type. return: quantization range.
Here is the function:
def _get_qrange_for_qType(qType):
"""
Helper function to get the quantization range for a type.
parameter qType: quantization type.
return: quantization range.
"""
if qType == onnx_proto.TensorProto.UINT8:
return 255 # 2^b - 1
elif qType == onnx_proto.TensorProto.INT8:
return 254 # [-(2^{b-1}-1), 2^{b-1}-1]: [-127, 127] for 8 bits.
else:
raise ValueError("unsupported quantization data type") | Helper function to get the quantization range for a type. parameter qType: quantization type. return: quantization range. |
21,420 | import numpy as np
import onnx
import onnx.numpy_helper
from onnx import onnx_pb as onnx_proto
from onnx import shape_inference
The provided code snippet includes necessary dependencies for implementing the `_find_nodes_using_initializer` function. Write a Python function `def _find_nodes_using_initializer(graph, initializer)` to solve the following problem:
Helper function to find all nodes with an initializer as a input. parameter graph: GraphProto. parameter initializer: Initializer in TensorProto format. return: List of nodes.
Here is the function:
def _find_nodes_using_initializer(graph, initializer):
"""
Helper function to find all nodes with an initializer as a input.
parameter graph: GraphProto.
parameter initializer: Initializer in TensorProto format.
return: List of nodes.
"""
result = []
for node in graph.node:
for node_input in node.input:
if node_input == initializer.name:
result.append(node)
return result | Helper function to find all nodes with an initializer as a input. parameter graph: GraphProto. parameter initializer: Initializer in TensorProto format. return: List of nodes. |
21,421 | import logging
from typing import Any
from sparseml.base import Framework, get_version
from sparseml.framework import FrameworkInferenceProviderInfo, FrameworkInfo
from sparseml.onnx.base import check_onnx_install, check_onnxruntime_install
from sparseml.onnx.sparsification import sparsification_info
from sparseml.sparsification import SparsificationInfo
def detect_framework(item: Any) -> Framework:
"""
Detect the supported ML framework for a given item specifically for the
onnx/onnxruntime package.
Supported input types are the following:
- A Framework enum
- A string of any case representing the name of the framework
(deepsparse, onnx, keras, pytorch, tensorflow_v1)
- A supported file type within the framework such as model files:
(onnx, pth, h5, pb)
- An object from a supported ML framework such as a model instance
If the framework cannot be determined, will return Framework.unknown
:param item: The item to detect the ML framework for
:type item: Any
:return: The detected framework from the given item
:rtype: Framework
"""
framework = Framework.unknown
if isinstance(item, Framework):
_LOGGER.debug("framework detected from Framework instance")
framework = item
elif isinstance(item, str) and item.lower().strip() in Framework.__members__:
_LOGGER.debug("framework detected from Framework string instance")
framework = Framework[item.lower().strip()]
elif isinstance(item, str) and "onnx" in item.lower().strip():
_LOGGER.debug("framework detected from onnx text")
# string, check if it's a string saying onnx first
framework = Framework.onnx
elif isinstance(item, str) and ".onnx" in item.lower().strip():
_LOGGER.debug("framework detected from .onnx")
# string, check if it's a file url or path that ends with onnx extension
framework = Framework.onnx
elif check_onnx_install(raise_on_error=False):
from onnx import ModelProto
if isinstance(item, ModelProto):
_LOGGER.debug("framework detected from ONNX instance")
# onnx native support
framework = Framework.onnx
return framework
class Framework(Enum):
"""
Framework types known of/supported within the sparseml/deepsparse ecosystem
"""
unknown = "unknown"
deepsparse = "deepsparse"
onnx = "onnx"
keras = "keras"
pytorch = "pytorch"
tensorflow_v1 = "tensorflow_v1"
The provided code snippet includes necessary dependencies for implementing the `is_supported` function. Write a Python function `def is_supported(item: Any) -> bool` to solve the following problem:
:param item: The item to detect the support for :type item: Any :return: True if the item is supported by onnx/onnxruntime, False otherwise :rtype: bool
Here is the function:
def is_supported(item: Any) -> bool:
"""
:param item: The item to detect the support for
:type item: Any
:return: True if the item is supported by onnx/onnxruntime, False otherwise
:rtype: bool
"""
framework = detect_framework(item)
return framework == Framework.onnx | :param item: The item to detect the support for :type item: Any :return: True if the item is supported by onnx/onnxruntime, False otherwise :rtype: bool |
21,422 | import logging
from typing import Any
from sparseml.base import Framework, get_version
from sparseml.framework import FrameworkInferenceProviderInfo, FrameworkInfo
from sparseml.onnx.base import check_onnx_install, check_onnxruntime_install
from sparseml.onnx.sparsification import sparsification_info
from sparseml.sparsification import SparsificationInfo
class Framework(Enum):
"""
Framework types known of/supported within the sparseml/deepsparse ecosystem
"""
unknown = "unknown"
deepsparse = "deepsparse"
onnx = "onnx"
keras = "keras"
pytorch = "pytorch"
tensorflow_v1 = "tensorflow_v1"
def get_version(
package_name: str,
raise_on_error: bool,
alternate_package_names: Optional[List[str]] = None,
) -> Optional[str]:
"""
:param package_name: The name of the full package, as it would be imported,
to get the version for
:type package_name: str
:param raise_on_error: True to raise an error if package is not installed
or couldn't be imported, False to return None
:type raise_on_error: bool
:param alternate_package_names: List of alternate names to look for the package
under if package_name is not found. Useful for nightly builds.
:type alternate_package_names: Optional[List[str]]
:return: the version of the desired package if detected, otherwise raises an error
:rtype: str
"""
current_version: Optional[str] = None
version_err = None
try:
current_version = pkg_resources.get_distribution(package_name).version
except Exception as err:
version_err = err
if version_err and alternate_package_names:
next_package = alternate_package_names.pop()
return get_version(next_package, raise_on_error, alternate_package_names)
if version_err and raise_on_error:
raise ImportError(
f"error while getting current version for {package_name}: {version_err}"
)
return current_version if not version_err else None
def check_onnx_install(
min_version: Optional[str] = _ONNX_MIN_VERSION,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the onnx package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for onnx that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnx that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if onnx is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if onnx_err is not None:
if raise_on_error:
raise onnx_err
return False
return check_version("onnx", min_version, max_version, raise_on_error)
def check_onnxruntime_install(
min_version: Optional[str] = _ORT_MIN_VERSION,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the onnxruntime package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for onnxruntime that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnxruntime that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if onnxruntime is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if onnxruntime_err is not None:
if raise_on_error:
raise onnxruntime_err
return False
return check_version(
"onnxruntime",
min_version,
max_version,
raise_on_error,
extra_error_message="Try installing sparseml[onnxruntime] or onnxruntime",
)
The provided code snippet includes necessary dependencies for implementing the `framework_info` function. Write a Python function `def framework_info() -> FrameworkInfo` to solve the following problem:
Detect the information for the onnx/onnxruntime framework such as package versions, availability for core actions such as training and inference, sparsification support, and inference provider support. :return: The framework info for onnx/onnxruntime :rtype: FrameworkInfo
Here is the function:
def framework_info() -> FrameworkInfo:
"""
Detect the information for the onnx/onnxruntime framework such as package versions,
availability for core actions such as training and inference,
sparsification support, and inference provider support.
:return: The framework info for onnx/onnxruntime
:rtype: FrameworkInfo
"""
all_providers = []
available_providers = []
if check_onnxruntime_install(raise_on_error=False):
from onnxruntime import get_all_providers, get_available_providers
available_providers = get_available_providers()
all_providers = get_all_providers()
cpu_provider = FrameworkInferenceProviderInfo(
name="cpu",
description="Base CPU provider within ONNXRuntime",
device="cpu",
supported_sparsification=SparsificationInfo(), # TODO: fill in when available
available=(
check_onnx_install(raise_on_error=False)
and check_onnxruntime_install(raise_on_error=False)
and "CPUExecutionProvider" in available_providers
),
properties={},
warnings=[],
)
gpu_provider = FrameworkInferenceProviderInfo(
name="cuda",
description="Base GPU CUDA provider within ONNXRuntime",
device="gpu",
supported_sparsification=SparsificationInfo(), # TODO: fill in when available
available=(
check_onnx_install(raise_on_error=False)
and check_onnxruntime_install(raise_on_error=False)
and "CUDAExecutionProvider" in available_providers
),
properties={},
warnings=[],
)
return FrameworkInfo(
framework=Framework.onnx,
package_versions={
"onnx": get_version(package_name="onnx", raise_on_error=False),
"onnxruntime": (
get_version(package_name="onnxruntime", raise_on_error=False)
),
"sparsezoo": get_version(
package_name="sparsezoo",
raise_on_error=False,
alternate_package_names=["sparsezoo-nightly"],
),
"sparseml": get_version(
package_name="sparseml",
raise_on_error=False,
alternate_package_names=["sparseml-nightly"],
),
},
sparsification=sparsification_info(),
inference_providers=[cpu_provider, gpu_provider],
properties={
"available_providers": available_providers,
"all_providers": all_providers,
},
training_available=False,
sparsification_available=True,
exporting_onnx_available=True,
inference_available=True,
) | Detect the information for the onnx/onnxruntime framework such as package versions, availability for core actions such as training and inference, sparsification support, and inference provider support. :return: The framework info for onnx/onnxruntime :rtype: FrameworkInfo |
21,423 | from copy import deepcopy
from typing import Union
import numpy
from onnx import ModelProto, TensorProto, numpy_helper
def _check_sparse_tensor_import():
if sparse_tensor_import_error:
# ONNX >= 1.6.0 required
raise sparse_tensor_import_error
def create_sparse_tensor(
array: Union[numpy.ndarray, TensorProto],
name: str = None,
) -> Union[SparseTensorProto, None]:
"""
:param array: numpy array or TensorProto object to convert to sparse representation
:param name: name of this sparse tensor. Will be stored in
SparseTensorProto.values.name. If the given array is a TensorProto, name will
default to TensorProto.name
:return: SparseTensorProto object built from the sparse representation of the input
array
"""
_check_sparse_tensor_import()
if isinstance(array, TensorProto):
if not name:
name = array.name or None
array = numpy_helper.to_array(array)
# flatten array and convert to sparse
original_dims = array.shape
array = array.reshape(-1)
nonzero_idxs = array.nonzero()
nonzero_values = array[nonzero_idxs]
nonzero_idxs = nonzero_idxs[0] # unwrap 1-tuple
nonzero_idxs = nonzero_idxs.astype(numpy.int64) # required idx dtype
# build SparseTensorProto
return SparseTensorProto(
values=numpy_helper.from_array(nonzero_values, name=name),
indices=numpy_helper.from_array(nonzero_idxs),
dims=original_dims,
)
_COMPRESSIBLE_DATA_TYPES = {
TensorProto.FLOAT,
TensorProto.FLOAT16,
TensorProto.INT64,
TensorProto.INT32,
TensorProto.INT16,
}
The provided code snippet includes necessary dependencies for implementing the `convert_model_initializers_to_sparse` function. Write a Python function `def convert_model_initializers_to_sparse( model: ModelProto, sparsity_threshold: float = 0.6, inplace: bool = True ) -> ModelProto` to solve the following problem:
:param model: ONNX model with initializers to convert to sparse :param sparsity_threshold: the minimum sparsity of a tensor to be converted to sparse representation. Default is 0.6 :param inplace: True to do model conversion in place. Default is True :return: the given model with initializers above the sparsity threshold converted to sparse initializers
Here is the function:
def convert_model_initializers_to_sparse(
model: ModelProto, sparsity_threshold: float = 0.6, inplace: bool = True
) -> ModelProto:
"""
:param model: ONNX model with initializers to convert to sparse
:param sparsity_threshold: the minimum sparsity of a tensor to be converted
to sparse representation. Default is 0.6
:param inplace: True to do model conversion in place. Default is True
:return: the given model with initializers above the sparsity threshold
converted to sparse initializers
"""
_check_sparse_tensor_import()
if not inplace:
model = deepcopy(model)
sparsified_initializers = []
for initializer in model.graph.initializer:
if initializer.data_type not in _COMPRESSIBLE_DATA_TYPES:
continue
val = numpy_helper.to_array(initializer)
sparsity = 1.0 - (numpy.count_nonzero(val) / val.size)
if sparsity < sparsity_threshold:
continue
sparse_tensor = create_sparse_tensor(val, initializer.name)
if sparse_tensor is None:
continue
sparsified_initializers.append(initializer)
model.graph.sparse_initializer.append(sparse_tensor)
for initializer in sparsified_initializers:
model.graph.initializer.remove(initializer)
return model | :param model: ONNX model with initializers to convert to sparse :param sparsity_threshold: the minimum sparsity of a tensor to be converted to sparse representation. Default is 0.6 :param inplace: True to do model conversion in place. Default is True :return: the given model with initializers above the sparsity threshold converted to sparse initializers |
21,424 | from copy import deepcopy
from typing import Union
import numpy
from onnx import ModelProto, TensorProto, numpy_helper
def _check_sparse_tensor_import():
if sparse_tensor_import_error:
# ONNX >= 1.6.0 required
raise sparse_tensor_import_error
def sparse_tensor_to_dense(sparse_tensor: SparseTensorProto) -> TensorProto:
"""
:param sparse_tensor: SparseTensorProto object
:return: TensorProto object that is the dense representation of the given
sparse tensor.
"""
_check_sparse_tensor_import()
name = sparse_tensor.values.name
values = numpy_helper.to_array(sparse_tensor.values)
indices = numpy_helper.to_array(sparse_tensor.indices)
shape = sparse_tensor.dims
dense_array = numpy.zeros(numpy.prod(shape)).astype(values.dtype)
dense_array[indices] = values
dense_array = dense_array.reshape(shape)
return numpy_helper.from_array(dense_array, name=name)
The provided code snippet includes necessary dependencies for implementing the `convert_sparse_initializers_to_dense` function. Write a Python function `def convert_sparse_initializers_to_dense( model: ModelProto, inplace: bool = True ) -> ModelProto` to solve the following problem:
:param model: ONNX model with sparse initializers to convert to dense representation :param inplace: True to do model conversion in place. Default is True :return: The given model with all sparse initializers converted to dense initializers
Here is the function:
def convert_sparse_initializers_to_dense(
model: ModelProto, inplace: bool = True
) -> ModelProto:
"""
:param model: ONNX model with sparse initializers to convert to dense representation
:param inplace: True to do model conversion in place. Default is True
:return: The given model with all sparse initializers converted to dense
initializers
"""
_check_sparse_tensor_import()
if not inplace:
model = deepcopy(model)
while model.graph.sparse_initializer:
sparse_initializer = model.graph.sparse_initializer.pop()
model.graph.initializer.append(sparse_tensor_to_dense(sparse_initializer))
return model | :param model: ONNX model with sparse initializers to convert to dense representation :param inplace: True to do model conversion in place. Default is True :return: The given model with all sparse initializers converted to dense initializers |
21,425 | import logging
import os
import re
import tempfile
import time
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy
import psutil
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.base import require_onnxruntime
from sparseml.onnx.utils.data import DataLoader
from sparseml.onnx.utils.graph_editor import override_model_batch_size
from sparseml.onnx.utils.helpers import (
extract_node_id,
get_node_by_id,
get_prunable_node_from_foldable,
is_foldable_node,
)
from sparsezoo import File, Model
from sparsezoo.utils import load_model
def _check_args(args, kwargs):
if args:
raise ValueError(
"args was not empty, cannot pass any additional args through: {}".format(
args
)
)
if kwargs:
raise ValueError(
(
"kwargs was not empty, cannot pass any additional args through: {}"
).format(kwargs)
) | null |
21,426 | import logging
import os
import re
import tempfile
import time
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy
import psutil
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.base import require_onnxruntime
from sparseml.onnx.utils.data import DataLoader
from sparseml.onnx.utils.graph_editor import override_model_batch_size
from sparseml.onnx.utils.helpers import (
extract_node_id,
get_node_by_id,
get_prunable_node_from_foldable,
is_foldable_node,
)
from sparsezoo import File, Model
from sparsezoo.utils import load_model
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `max_available_cores` function. Write a Python function `def max_available_cores() -> int` to solve the following problem:
:return: the maximum number of physical cores detected on the system
Here is the function:
def max_available_cores() -> int:
"""
:return: the maximum number of physical cores detected on the system
"""
if cpu_details is not None:
_LOGGER.debug(
"retrieving physical core count per socket "
"from deepsparse.cpu.cpu_details()"
)
return cpu_details()[0]
_LOGGER.debug("retrieving physical core count using psutil")
physical_cores = psutil.cpu_count(logical=False)
return physical_cores if physical_cores else -1 | :return: the maximum number of physical cores detected on the system |
21,427 | import logging
import os
import re
import tempfile
import time
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy
import psutil
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.base import require_onnxruntime
from sparseml.onnx.utils.data import DataLoader
from sparseml.onnx.utils.graph_editor import override_model_batch_size
from sparseml.onnx.utils.helpers import (
extract_node_id,
get_node_by_id,
get_prunable_node_from_foldable,
is_foldable_node,
)
from sparsezoo import File, Model
from sparsezoo.utils import load_model
_LOGGER = logging.getLogger(__name__)
def extract_node_id(node: NodeProto) -> str:
"""
Get the node id for a given node from an ONNX model.
Grabs the first ouput id as the node id.
This is because is guaranteed to be unique for this node by the ONNX spec.
:param node: the node to grab an id for
:return: the id for the node
"""
outputs = node.output
return str(outputs[0])
def get_node_by_id(model: ModelProto, node_id: str) -> Union[NodeProto, None]:
"""
Get a node from a model by the node_id generated from extract_node_id
:param model: the model proto loaded from the ONNX file
:param node_id: id of the node to get from the model
:return: the retrieved node or None if no node found
"""
for node in model.graph.node:
if extract_node_id(node) == node_id:
return node
return None
def is_foldable_node(node: Union[str, NodeProto]) -> bool:
"""
Foldable nodes as defined by ONNX Runtime and what it supports layerwise folding
in the ONNX graphs. More info can be found in their docs:
https://www.onnxruntime.ai/docs/resources/graph-optimizations.html
:param node: the node or node type to check if it is foldable or not
according to the ONNX Runtime specs
:return: True if the node is foldable and therefore can be combined with other
nodes, False otherwise
"""
return (node.lower() if isinstance(node, str) else str(node.op_type).lower()) in [
"batchnormalization",
"add",
"mul",
]
def get_prunable_node_from_foldable(
model: ModelProto,
foldable_node: Union[str, NodeProto],
traverse_previous: bool = True,
max_node_distance: int = 3,
) -> Union[None, NodeProto]:
"""
Get a prunable node that is attached by foldable nodes to a given foldable node.
Returns None if nothing could be found.
Ex: get the convolution that would be folded for an attached BatchNormalization
:param model: the model the node is from
:param foldable_node: the foldable node or node id to find prunable node from
:param traverse_previous: True to only search for previous prunable nodes that the
foldable node could have been attached to for Conv -> BN patterns.
False to only search for following prunable nodes that the foldable node
could have been attached to for BN -> Conv patterns.
:param max_node_distance: The maximum distance
(and therefore number of foldable nodes) the prunable node must be within
to match. Ex: max_node_distance = 3, the prunable node must be within 3
other foldable nodes of the foldable node passed in to match
:return: the found prunable node
"""
if isinstance(foldable_node, str):
foldable_node = get_node_by_id(model, foldable_node)
if not is_foldable_node(foldable_node):
raise ValueError(
"non foldable node passed in for foldable_node: {}".format(
extract_node_id(foldable_node)
)
)
prunable_node = foldable_node
num_steps = 0
while (
prunable_node is not None
and not is_prunable_node(model, prunable_node)
and is_foldable_node(prunable_node)
and num_steps < max_node_distance
):
next_nodes = (
get_node_input_nodes(model, prunable_node)
if traverse_previous
else get_node_output_nodes(model, prunable_node)
)
num_steps += 1
prunable_node = next_nodes[0] if next_nodes else None
return (
None
if prunable_node is None or not is_prunable_node(model, prunable_node)
else prunable_node
)
The provided code snippet includes necessary dependencies for implementing the `correct_nm_analyze_model_node_ids` function. Write a Python function `def correct_nm_analyze_model_node_ids(nm_result: Dict, model: Union[str, ModelProto])` to solve the following problem:
Correct the node ids returned from the deepsparse.analyze_model api. In some cases, it will return the ids for folded nodes due to ONNXRuntime folding. This finds the corrected node ids from those folded nodes. Additionally, ops that did not have an id are changed from the returned string <none> to proper None python type :param nm_result: the result from the deepsparse.analyze_model api :param model: the onnx model proto or path to the onnx file that the nm_result was for
Here is the function:
def correct_nm_analyze_model_node_ids(nm_result: Dict, model: Union[str, ModelProto]):
"""
Correct the node ids returned from the deepsparse.analyze_model api.
In some cases, it will return the ids for folded nodes due to ONNXRuntime folding.
This finds the corrected node ids from those folded nodes.
Additionally, ops that did not have an id are changed from the returned
string <none> to proper None python type
:param nm_result: the result from the deepsparse.analyze_model api
:param model: the onnx model proto or path to the onnx file that the
nm_result was for
"""
model = load_model(model)
for layer in nm_result["layer_info"]:
node_id = (
layer["canonical_name"] if "<none>" not in layer["canonical_name"] else None
)
if node_id is None:
layer["canonical_name"] = None
continue
node = get_node_by_id(model, node_id)
if node is None:
_LOGGER.warning(
(
"node returned from deepsparse.model_debug_analysis "
"was not found in the model graph; node id {}"
).format(node_id)
)
continue
if is_foldable_node(node):
_LOGGER.debug(
"foldable node of id {} returned from "
"deepsparse.model_debug_analysis api, matching to prunable node"
)
# traverse previous because incorrect node id will only be returned
# for following foldable layers, not previous
node = get_prunable_node_from_foldable(model, node, traverse_previous=True)
if node is None:
_LOGGER.warning(
(
"could not find prunable node from a foldable node "
"returned in the deepsparse.model_debug_analysis api; "
"node id: {}"
).format(node_id)
)
else:
prunable_node_id = extract_node_id(node)
_LOGGER.debug(
(
"matched prunable node of id {} to foldable node {} as "
"returned from deepsparse.model_debug_analysis api"
).format(prunable_node_id, node_id)
)
layer["canonical_name"] = prunable_node_id | Correct the node ids returned from the deepsparse.analyze_model api. In some cases, it will return the ids for folded nodes due to ONNXRuntime folding. This finds the corrected node ids from those folded nodes. Additionally, ops that did not have an id are changed from the returned string <none> to proper None python type :param nm_result: the result from the deepsparse.analyze_model api :param model: the onnx model proto or path to the onnx file that the nm_result was for |
21,428 | import logging
import os
import re
import tempfile
import time
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy
import psutil
from onnx import ModelProto
from tqdm import auto
from sparseml.onnx.base import require_onnxruntime
from sparseml.onnx.utils.data import DataLoader
from sparseml.onnx.utils.graph_editor import override_model_batch_size
from sparseml.onnx.utils.helpers import (
extract_node_id,
get_node_by_id,
get_prunable_node_from_foldable,
is_foldable_node,
)
from sparsezoo import File, Model
from sparsezoo.utils import load_model
The provided code snippet includes necessary dependencies for implementing the `split_canonical_names` function. Write a Python function `def split_canonical_names(nm_result: Dict)` to solve the following problem:
Splits analysis layer results from grouped canonical names by individual nodes. Stores the original grouped canonical name in the 'meta_canonical_name' field. Will split on any canonical_name that includes ','. :param nm_result: the result from the deepsparse.model_debug_analysis api
Here is the function:
def split_canonical_names(nm_result: Dict):
"""
Splits analysis layer results from grouped canonical names by individual nodes.
Stores the original grouped canonical name in the 'meta_canonical_name' field.
Will split on any canonical_name that includes ','.
:param nm_result: the result from the deepsparse.model_debug_analysis api
"""
split_layer_infos = []
for layer in nm_result["layer_info"]:
if "," in layer["canonical_name"]:
for sub_layer_name in layer["canonical_name"].split(","):
sub_layer_info = deepcopy(layer)
sub_layer_info["meta_canonical_name"] = layer["canonical_name"]
sub_layer_info["canonical_name"] = sub_layer_name
split_layer_infos.append(sub_layer_info)
else:
layer["meta_canonical_name"] = None
split_layer_infos.append(layer)
nm_result["layer_info"] = split_layer_infos | Splits analysis layer results from grouped canonical names by individual nodes. Stores the original grouped canonical name in the 'meta_canonical_name' field. Will split on any canonical_name that includes ','. :param nm_result: the result from the deepsparse.model_debug_analysis api |
21,429 | import numpy
from scipy.stats import entropy
The provided code snippet includes necessary dependencies for implementing the `kl_divergence` function. Write a Python function `def kl_divergence( predicted: numpy.ndarray, expected: numpy.ndarray, zero_point: float = 0.0, min_value: float = 1.0, ) -> float` to solve the following problem:
Calculate the kl_divergence (entropy) between two input arrays. Shifts all values such that the zero_point is at one. If a value is lower, then sets it equal to 1. :param predicted: the first array to compare with :param expected: the second array to compare with :param zero_point: the zero point that should be used to shift values above 1 :param min_value: the minimum value that all values will be truncated to if they are below :return: the calculated KL divergence
Here is the function:
def kl_divergence(
predicted: numpy.ndarray,
expected: numpy.ndarray,
zero_point: float = 0.0,
min_value: float = 1.0,
) -> float:
"""
Calculate the kl_divergence (entropy) between two input arrays.
Shifts all values such that the zero_point is at one.
If a value is lower, then sets it equal to 1.
:param predicted: the first array to compare with
:param expected: the second array to compare with
:param zero_point: the zero point that should be used to shift values above 1
:param min_value: the minimum value that all values will be truncated to
if they are below
:return: the calculated KL divergence
"""
if predicted.shape != expected.shape:
raise ValueError(
"predicted shape of {} must match expected shape of {}".format(
predicted.shape, expected.shape
)
)
# shift everything to have a min of 1 for the entropy / kl_divergence equation
predicted = predicted.flatten() - zero_point + min_value
expected = expected.flatten() - zero_point + min_value
predicted[predicted < min_value] = min_value
expected[expected < min_value] = min_value
divergence = entropy(predicted, expected)
return divergence | Calculate the kl_divergence (entropy) between two input arrays. Shifts all values such that the zero_point is at one. If a value is lower, then sets it equal to 1. :param predicted: the first array to compare with :param expected: the second array to compare with :param zero_point: the zero point that should be used to shift values above 1 :param min_value: the minimum value that all values will be truncated to if they are below :return: the calculated KL divergence |
21,430 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
The provided code snippet includes necessary dependencies for implementing the `get_numpy_dtype` function. Write a Python function `def get_numpy_dtype(tensor: onnx.TensorProto) -> Union[None, numpy.dtype]` to solve the following problem:
Extract the NumPy dtype of an ONNX tensor. Returns None if there is not a direct mapping from the ONNX data type to a NumPy dtype. :param tensor: the tensor to get the dtype of :return: a NumPy dtype for the tensor if available otherwise None
Here is the function:
def get_numpy_dtype(tensor: onnx.TensorProto) -> Union[None, numpy.dtype]:
"""
Extract the NumPy dtype of an ONNX tensor.
Returns None if there is not a direct mapping from the ONNX data type
to a NumPy dtype.
:param tensor: the tensor to get the dtype of
:return: a NumPy dtype for the tensor if available otherwise None
"""
data_type_enum = tensor.type.tensor_type.elem_type # type: int
data_type = onnx.TensorProto.DataType.Name(data_type_enum).lower() # type: str
if data_type == "float":
data_type = "float32"
if hasattr(numpy, data_type):
return getattr(numpy, data_type)
return None | Extract the NumPy dtype of an ONNX tensor. Returns None if there is not a direct mapping from the ONNX data type to a NumPy dtype. :param tensor: the tensor to get the dtype of :return: a NumPy dtype for the tensor if available otherwise None |
21,431 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
_LOGGER = logging.getLogger(__name__)
def extract_node_id(node: NodeProto) -> str:
"""
Get the node id for a given node from an ONNX model.
Grabs the first ouput id as the node id.
This is because is guaranteed to be unique for this node by the ONNX spec.
:param node: the node to grab an id for
:return: the id for the node
"""
outputs = node.output
return str(outputs[0])
NodeShape = NamedTuple(
"NodeShape",
[
("id", str),
("input_shapes", Union[List[List[int]], None]),
("output_shapes", Union[List[List[int]], None]),
],
)
def extract_nodes_shapes_ort(model: ModelProto) -> Dict[str, List[List[int]]]:
"""
Creates a modified model to expose intermediate outputs and runs an ONNX Runtime
InferenceSession to obtain the output shape of each node.
:param model: an ONNX model
:return: a list of NodeArg with their shape exposed
"""
import onnxruntime # import protected by @require_onnxruntime()
model_copy = deepcopy(model)
for node in model_copy.graph.node:
intermediate_layer_value_info = make_empty_tensor_value_info(
extract_node_id(node)
)
model_copy.graph.output.append(intermediate_layer_value_info)
sess_options = onnxruntime.SessionOptions()
sess_options.log_severity_level = 3
providers = (
["CUDAExecutionProvider", "CPUExecutionProvider"]
if onnxruntime.get_device() == "GPU"
else ["CPUExecutionProvider"]
)
sess = onnxruntime.InferenceSession(
model_copy.SerializeToString(),
sess_options=sess_options,
providers=providers,
)
output_shapes = {}
for node in sess.get_outputs() + sess.get_inputs():
output_shapes[node.name] = (
node.shape if node.shape is not None and len(node.shape) > 0 else None
)
return output_shapes
def extract_nodes_shapes_shape_inference(
model: ModelProto,
) -> Dict[str, List[Union[None, List[int]]]]:
"""
Creates a modified model to expose intermediate outputs and runs an ONNX shape
inference to obtain the output shape of each node.
NOTE: The ONNX docs on shape inference have the following
disclaimer on shape inference:
Shape inference is not guaranteed to be complete.
In particular, some dynamic behaviors block the flow of shape inference,
for example a Reshape to a dynamically-provide shape.
Also, all operators are not required to have a shape inference implementation.
:param model: an ONNX model
:return: a list of NodeProto with their shape exposed
"""
model_copy = deepcopy(model)
for node in model_copy.graph.node:
model_copy.graph.output.extend(
[
onnx.helper.make_tensor_value_info(
output, onnx.TensorProto.UNDEFINED, None
)
for output in node.output
]
)
if hasattr(onnx, "shape_inference"):
model_copy = onnx.shape_inference.infer_shapes(model_copy)
else:
raise ModuleNotFoundError(
"onnx.shape_inference not available for current version, "
"please upgrade to use this functionality"
)
output_shapes = {}
for node in model_copy.graph.output:
node_shape = extract_shape(node)
output_shapes[node.name] = (
list(node_shape) if node_shape is not None and len(node_shape) > 0 else None
)
return output_shapes
The provided code snippet includes necessary dependencies for implementing the `extract_node_shapes` function. Write a Python function `def extract_node_shapes(model: ModelProto) -> Dict[str, NodeShape]` to solve the following problem:
Extracts the shape information for each node as a NodeShape object. :param model: the loaded onnx.ModelProto to extract node shape information from :return: a mapping of node id to a NodeShape object
Here is the function:
def extract_node_shapes(model: ModelProto) -> Dict[str, NodeShape]:
"""
Extracts the shape information for each node as a NodeShape object.
:param model: the loaded onnx.ModelProto to extract node shape information from
:return: a mapping of node id to a NodeShape object
"""
# Maps NodeArg to its inputs
node_to_inputs = {}
for node in model.graph.node:
node_to_inputs[extract_node_id(node)] = node.input
# Obtains output shapes for each model's node
output_shapes = None
try:
output_shapes = extract_nodes_shapes_ort(model)
except Exception as err:
_LOGGER.warning(
"Extracting shapes using ONNX Runtime session failed: {}".format(err)
)
if output_shapes is None:
try:
output_shapes = extract_nodes_shapes_shape_inference(model)
except Exception as err:
_LOGGER.warning(
"Extracting shapes using ONNX shape_inference failed: {}".format(err)
)
# Obtains the input shapes for each node
if output_shapes is None:
output_shapes = {}
input_shapes = {}
for node in output_shapes.keys():
if node not in node_to_inputs:
continue
input_shapes[node] = [
output_shapes[input_node]
for input_node in node_to_inputs[node]
if input_node in output_shapes and output_shapes[input_node] is not None
]
input_shapes[node] = input_shapes[node] if len(input_shapes[node]) > 0 else None
# Combines shape information into mapping of node id to a NodeShape object
node_shapes = {}
for node in output_shapes.keys():
node_shapes[node] = NodeShape(
node,
input_shapes[node] if node in input_shapes else None,
[output_shapes[node]]
if node in output_shapes and output_shapes[node] is not None
else None,
)
def _fix_shapes(shapes: List[Union[List[Union[int, None, str]], None]]):
if not shapes:
return
for shape in shapes:
if not shape:
continue
for index, index_shape in enumerate(shape):
try:
shape[index] = (
round(index_shape)
if isinstance(index_shape, float)
else int(index_shape)
)
except Exception:
# not parsable as an int (none or string)
# set to None
shape[index] = None
for node_id, node_shape in node_shapes.items():
_fix_shapes(node_shape.input_shapes)
_fix_shapes(node_shape.output_shapes)
return node_shapes | Extracts the shape information for each node as a NodeShape object. :param model: the loaded onnx.ModelProto to extract node shape information from :return: a mapping of node id to a NodeShape object |
21,432 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
The provided code snippet includes necessary dependencies for implementing the `get_attr_float_val_for_node` function. Write a Python function `def get_attr_float_val_for_node(node: onnx.NodeProto, attr: str) -> Union[float, None]` to solve the following problem:
:param node: Node to get the attribute value of :param attr: Attribute name to match in the node :return: The value of the attribute if the attribute found in the node and is a float type. Otherwise returns None
Here is the function:
def get_attr_float_val_for_node(node: onnx.NodeProto, attr: str) -> Union[float, None]:
"""
:param node: Node to get the attribute value of
:param attr: Attribute name to match in the node
:return: The value of the attribute if the attribute found in the node and is
a float type. Otherwise returns None
"""
attr = attr.lower()
node_attr_matches = [att for att in node.attribute if attr in att.name]
if not node_attr_matches:
return None
node_attr = node_attr_matches[0]
return node_attr.f if node_attr.type == node_attr.FLOAT else None | :param node: Node to get the attribute value of :param attr: Attribute name to match in the node :return: The value of the attribute if the attribute found in the node and is a float type. Otherwise returns None |
21,433 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
def extract_node_id(node: NodeProto) -> str:
"""
Get the node id for a given node from an ONNX model.
Grabs the first ouput id as the node id.
This is because is guaranteed to be unique for this node by the ONNX spec.
:param node: the node to grab an id for
:return: the id for the node
"""
outputs = node.output
return str(outputs[0])
def get_node_params(
model: ModelProto, node: NodeProto, include_values: bool = True
) -> Tuple[NodeParam, Union[NodeParam, None]]:
"""
Get the params (weight and bias) for a node in an ONNX ModelProto.
Must be an op type of one of [conv, gemm, matmul]
:param model: the model proto loaded from the ONNX file
:param node: the conv node to get the params for
:param include_values: True to include the param values as NumPy arrays
in the returned NodeParam objects.
False to not load the values -- in this event NodeParam.val will be None
:return: a tuple containing the weight, bias (if it is present)
"""
node_id = extract_node_id(node)
if str(node.op_type).lower() == "conv":
return conv_node_params(model, node, include_values)
if str(node.op_type).lower() == "gemm":
return gemm_node_params(model, node, include_values)
if str(node.op_type).lower() == "matmul":
return matmul_node_params(model, node, include_values)
raise ValueError(
(
"node_id of {} is not a supported node (conv, gemm, matmul) "
"for params: {}"
).format(node_id, node)
)
def get_prunable_nodes(model: Union[str, ModelProto]) -> List[Any]:
"""
Get the prunable nodes in an ONNX model proto.
Prunable nodes are defined as any conv, gemm, or matmul
:param model: the model proto loaded from the ONNX file
:return: a list of nodes from the model proto
"""
model = load_model(model)
prunable_nodes = []
for node in model.graph.node:
if is_prunable_node(model, node):
prunable_nodes.append(node)
return prunable_nodes
SparsityMeasurement = NamedTuple(
"SparsityMeasurement",
[
("node_id", str),
("params_count", int),
("params_zero_count", int),
("sparsity", float),
("density", float),
],
)
The provided code snippet includes necessary dependencies for implementing the `onnx_nodes_sparsities` function. Write a Python function `def onnx_nodes_sparsities( model: Union[str, ModelProto], ) -> Tuple[SparsityMeasurement, Dict[str, SparsityMeasurement]]` to solve the following problem:
Retrieve the sparsities for each Conv or Gemm op in an ONNX graph for the associated weight inputs. :param model: ONNX model to use :return: a tuple containing the overall sparsity measurement for the model, each conv or gemm node found in the model
Here is the function:
def onnx_nodes_sparsities(
model: Union[str, ModelProto],
) -> Tuple[SparsityMeasurement, Dict[str, SparsityMeasurement]]:
"""
Retrieve the sparsities for each Conv or Gemm op in an ONNX graph
for the associated weight inputs.
:param model: ONNX model to use
:return: a tuple containing the overall sparsity measurement for the model,
each conv or gemm node found in the model
"""
model = load_model(model)
node_inp_sparsities = OrderedDict() # type: Dict[str, SparsityMeasurement]
params_count = 0
params_zero_count = 0
for node in get_prunable_nodes(model):
node_id = extract_node_id(node)
node_key = "{}(id={})".format(node.op_type, node_id)
weight, bias = get_node_params(model, node)
zeros = weight.val.size - numpy.count_nonzero(weight.val)
sparsity = float(zeros) / float(weight.val.size)
density = 1.0 - sparsity
node_inp_sparsities[
"{}_inp={}".format(node_key, weight.name)
] = SparsityMeasurement(node_id, weight.val.size, zeros, sparsity, density)
params_count += weight.val.size
params_zero_count += zeros
return (
SparsityMeasurement(
"ModelProto",
params_count,
params_zero_count,
float(params_zero_count) / float(params_count),
float(params_count - params_zero_count) / float(params_count),
),
node_inp_sparsities,
) | Retrieve the sparsities for each Conv or Gemm op in an ONNX graph for the associated weight inputs. :param model: ONNX model to use :return: a tuple containing the overall sparsity measurement for the model, each conv or gemm node found in the model |
21,434 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
The provided code snippet includes necessary dependencies for implementing the `model_inputs` function. Write a Python function `def model_inputs(model: Union[str, ModelProto]) -> List` to solve the following problem:
Get the input to the model from an ONNX model :param model: the loaded model or a file path to the ONNX model to get the model inputs for :return: the input to the model
Here is the function:
def model_inputs(model: Union[str, ModelProto]) -> List:
"""
Get the input to the model from an ONNX model
:param model: the loaded model or a file path to the ONNX model
to get the model inputs for
:return: the input to the model
"""
model = load_model(model)
inputs_all = [node.name for node in model.graph.input]
inputs_init = [node.name for node in model.graph.initializer]
input_names = list(set(inputs_all) - set(inputs_init))
inputs = [node for node in model.graph.input if node.name in input_names]
assert len(input_names) == len(inputs)
return inputs | Get the input to the model from an ONNX model :param model: the loaded model or a file path to the ONNX model to get the model inputs for :return: the input to the model |
21,435 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
The provided code snippet includes necessary dependencies for implementing the `model_outputs` function. Write a Python function `def model_outputs(model: Union[str, ModelProto]) -> List` to solve the following problem:
Get the output from an ONNX model :param model: the loaded model or a file path to the ONNX model to get the model outputs for :return: the output from the model
Here is the function:
def model_outputs(model: Union[str, ModelProto]) -> List:
"""
Get the output from an ONNX model
:param model: the loaded model or a file path to the ONNX model
to get the model outputs for
:return: the output from the model
"""
model = load_model(model)
outputs = [node for node in model.graph.output]
return outputs | Get the output from an ONNX model :param model: the loaded model or a file path to the ONNX model to get the model outputs for :return: the output from the model |
21,436 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
The provided code snippet includes necessary dependencies for implementing the `get_kernel_shape` function. Write a Python function `def get_kernel_shape(attributes: Dict[str, Any]) -> Union[List[float], None]` to solve the following problem:
Get the kernel shape from a dictionary of a model's attributes :param attributes: a dictionary of a model's attributes :return: the kernel shape if attribute contains either the kernel or kernel_shape field, otherwise None
Here is the function:
def get_kernel_shape(attributes: Dict[str, Any]) -> Union[List[float], None]:
"""
Get the kernel shape from a dictionary of a model's attributes
:param attributes: a dictionary of a model's attributes
:return: the kernel shape if attribute contains either the kernel or
kernel_shape field, otherwise None
"""
if "kernel" in attributes:
return attributes["kernel"]
elif "kernel_shape" in attributes:
return attributes["kernel_shape"]
else:
return None | Get the kernel shape from a dictionary of a model's attributes :param attributes: a dictionary of a model's attributes :return: the kernel shape if attribute contains either the kernel or kernel_shape field, otherwise None |
21,437 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
def _calculate_flops_matmul(
op_type: str,
input_shape: Union[List[List], None] = None,
output_shape: Union[List[List], None] = None,
weight_shape: Union[List, None] = None,
) -> Union[float, None]:
"""
Calculates flops in an ONNX MatMul operation.
If input shape only contains 1 input, in otherwords the value of the
first index is 1, then the matrix operation is treated as a Gemm operation.
Otherwise the operation is treated like a NumPy operation.
Will return none if any required value is set to None
:param op_type: Operation type of flop calculation
:param input_shape: List of input shapes of operation
:param output_shape: List of output shapes of operation
:param weight_shape: Shape of weights in operation if any, else None
:return: The amount of floating point operations in the operation
"""
flops = None
if (
input_shape is not None
and output_shape is not None
and len(input_shape) > 1
and input_shape[0][-1] == input_shape[1][-2]
):
matrix_ops = (
input_shape[0][-2] * input_shape[1][-1] * (2 * input_shape[0][-1] - 1)
)
flops = numpy.prod(output_shape[0][:-2]) * matrix_ops
elif input_shape is not None and len(input_shape) == 1:
flops = _numpy_prod_with_none_check(weight_shape)
flops = flops * 2 if flops is not None else None
return flops
def _numpy_prod_with_none_check(array: Union[List, None]) -> Union[float, None]:
"""
:param array: an array like list
:return: the product of the array if array is not None otherwise return None
"""
return numpy.prod(array) if array is not None else None
def _array_as_numeric(array: Union[List, None]) -> Union[List, None]:
"""
:param array: an array like list
:return: the array with any non numeric or None values replaced with 1
if array itself is not None, otherwise return None
"""
if array is None:
return None
array = numpy.array(array, dtype=object)
# Check if the array datatype is a number
if numpy.issubdtype(array.dtype, numpy.number):
return array
else:
to_float = numpy.vectorize(_attempt_cast_as_float)
return to_float(array)
The provided code snippet includes necessary dependencies for implementing the `calculate_flops` function. Write a Python function `def calculate_flops( op_type: str, input_shape: Union[List[List], None] = None, output_shape: Union[List[List], None] = None, weight_shape: Union[List, None] = None, kernel_shape: Union[List, None] = None, bias_shape: Union[List, None] = None, attributes: Union[None, Dict[str, Any]] = None, ) -> Union[float, None]` to solve the following problem:
Calculate flops based on operation type and shape of certain attributes. If any fields necessary in operation are set to None, will return None :param op_type: Operation type of flop calculation :param input_shape: List of input shapes of operation :param output_shape: List of output shapes of operation :param weight_shape: Shape of weights in operation if any, else None :param kernel_shape: Shape of kernel in operation if any, else None :param bias_shape: Shape of bias in operation if any, else None :param attributes: The node attributes if any, else None :return: The amount of floating point operations in the operation
Here is the function:
def calculate_flops(
op_type: str,
input_shape: Union[List[List], None] = None,
output_shape: Union[List[List], None] = None,
weight_shape: Union[List, None] = None,
kernel_shape: Union[List, None] = None,
bias_shape: Union[List, None] = None,
attributes: Union[None, Dict[str, Any]] = None,
) -> Union[float, None]:
"""
Calculate flops based on operation type and shape of certain attributes.
If any fields necessary in operation are set to None, will return None
:param op_type: Operation type of flop calculation
:param input_shape: List of input shapes of operation
:param output_shape: List of output shapes of operation
:param weight_shape: Shape of weights in operation if any, else None
:param kernel_shape: Shape of kernel in operation if any, else None
:param bias_shape: Shape of bias in operation if any, else None
:param attributes: The node attributes if any, else None
:return: The amount of floating point operations in the operation
"""
input_shape = _array_as_numeric(input_shape)
output_shape = _array_as_numeric(output_shape)
weight_shape = _array_as_numeric(weight_shape)
kernel_shape = _array_as_numeric(kernel_shape)
bias_shape = _array_as_numeric(bias_shape)
if (
op_type == "Add"
or op_type == "Mul"
or op_type == "Div"
or op_type == "Sub"
or op_type == "Clip"
):
flops = _numpy_prod_with_none_check(output_shape)
elif (
op_type == "Relu"
or op_type == "LeakyRelu"
or op_type == "Sigmoid"
or op_type == "Tanh"
or op_type == "BatchNormalization"
):
flops = _numpy_prod_with_none_check(output_shape)
elif op_type == "GlobalAveragePool" or op_type == "GlobalMaxPool":
flops = _numpy_prod_with_none_check(input_shape)
elif op_type == "MaxPool" or op_type == "AveragePool":
flops = (
numpy.prod(output_shape) * numpy.prod(kernel_shape)
if output_shape is not None and kernel_shape is not None
else None
)
elif op_type == "MatMul":
flops = _calculate_flops_matmul(
op_type,
input_shape=input_shape,
output_shape=output_shape,
weight_shape=weight_shape,
)
elif op_type == "Gemm":
flops = _numpy_prod_with_none_check(weight_shape)
flops = flops * 2 if flops is not None else None
elif op_type == "Conv":
flops = (
numpy.prod(kernel_shape) * weight_shape[1] * numpy.prod(output_shape)
if kernel_shape is not None
and weight_shape is not None
and output_shape is not None
else None
)
if (
flops
and attributes
and "group" in attributes
and attributes["group"]
and attributes["group"] > 1
):
# adjust flops for group / depthwise convolutions
flops = flops / attributes["group"]
else:
flops = None
if flops is not None and bias_shape is not None:
if op_type == "Conv":
flops += numpy.prod(bias_shape) * output_shape[0][-1] * output_shape[0][-2]
else:
flops += numpy.prod(bias_shape)
return flops | Calculate flops based on operation type and shape of certain attributes. If any fields necessary in operation are set to None, will return None :param op_type: Operation type of flop calculation :param input_shape: List of input shapes of operation :param output_shape: List of output shapes of operation :param weight_shape: Shape of weights in operation if any, else None :param kernel_shape: Shape of kernel in operation if any, else None :param bias_shape: Shape of bias in operation if any, else None :param attributes: The node attributes if any, else None :return: The amount of floating point operations in the operation |
21,438 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
The provided code snippet includes necessary dependencies for implementing the `get_tensor_shape` function. Write a Python function `def get_tensor_shape(tensor: onnx.TensorProto) -> List[int]` to solve the following problem:
:param tensor: ONNX tensor to get the shape of :return: shape of the tensor as a list
Here is the function:
def get_tensor_shape(tensor: onnx.TensorProto) -> List[int]:
"""
:param tensor: ONNX tensor to get the shape of
:return: shape of the tensor as a list
"""
return [dim.dim_value for dim in tensor.type.tensor_type.shape.dim] | :param tensor: ONNX tensor to get the shape of :return: shape of the tensor as a list |
21,439 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
The provided code snippet includes necessary dependencies for implementing the `get_tensor_dim_shape` function. Write a Python function `def get_tensor_dim_shape(tensor: onnx.TensorProto, dim: Union[int, str]) -> int` to solve the following problem:
:param tensor: ONNX tensor to get the shape of a dimension of :param dim: dimension index of the tensor to get the shape of :return: shape of the tensor at the given dimension
Here is the function:
def get_tensor_dim_shape(tensor: onnx.TensorProto, dim: Union[int, str]) -> int:
"""
:param tensor: ONNX tensor to get the shape of a dimension of
:param dim: dimension index of the tensor to get the shape of
:return: shape of the tensor at the given dimension
"""
return (
tensor.type.tensor_type.shape.dim[dim].dim_value
or tensor.type.tensor_type.shape.dim[dim].dim_param
) | :param tensor: ONNX tensor to get the shape of a dimension of :param dim: dimension index of the tensor to get the shape of :return: shape of the tensor at the given dimension |
21,440 | import logging
from collections import OrderedDict
from copy import deepcopy
from functools import reduce
from typing import Any, Dict, List, NamedTuple, Tuple, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from onnx.helper import get_attribute_value, make_empty_tensor_value_info
from sparseml.onnx.base import require_onnxruntime
from sparsezoo.utils import load_model, save_onnx
def set_tensor_dim_shape(tensor: onnx.TensorProto, dim: int, value: Union[int, str]):
"""
Sets the shape of the tensor at the given dimension to the given value
:param tensor: ONNX tensor to modify the shape of
:param dim: dimension index of the tensor to modify the shape of
:param value: new shape for the given dimension
"""
if isinstance(value, str):
tensor.type.tensor_type.shape.dim[dim].dim_param = value
else:
tensor.type.tensor_type.shape.dim[dim].dim_value = value
The provided code snippet includes necessary dependencies for implementing the `override_model_input_shape` function. Write a Python function `def override_model_input_shape(model: Union[str, onnx.ModelProto], shape: List[int])` to solve the following problem:
Set the shape of the first input of the given model to the given shape. If given a file, the file will be overwritten :param model: ONNX model or model path to overrwrite :param shape: shape as list of integers to override with. must match existing dimensions
Here is the function:
def override_model_input_shape(model: Union[str, onnx.ModelProto], shape: List[int]):
"""
Set the shape of the first input of the given model to the given shape.
If given a file, the file will be overwritten
:param model: ONNX model or model path to overrwrite
:param shape: shape as list of integers to override with. must match
existing dimensions
"""
if not isinstance(model, onnx.ModelProto):
model_path = model
model = onnx.load(model)
else:
model_path = None
for dim, dim_size in enumerate(shape):
set_tensor_dim_shape(model.graph.input[0], dim, dim_size)
if model_path:
save_onnx(model, model_path) | Set the shape of the first input of the given model to the given shape. If given a file, the file will be overwritten :param model: ONNX model or model path to overrwrite :param shape: shape as list of integers to override with. must match existing dimensions |
21,441 | from typing import Tuple, Union
import numpy as np
import onnx
from sparseml.onnx.utils.graph_editor import (
ONNXGraph,
remove_node_and_params_from_graph,
swap_node_output,
update_model_param,
)
from sparseml.onnx.utils.helpers import (
BatchNormParams,
NodeParam,
conv_node_params,
get_batch_norm_params,
get_quantize_parent_for_dequantize_node,
)
def _fold_conv_bn(
model: onnx.ModelProto,
conv_node: onnx.NodeProto,
bn_node: onnx.NodeProto,
) -> bool:
"""
Folds the linear operations in bn_node into conv_node
:param model: The model to fold the node in
:param conv_node: The conv node to fold in to
:param bn_node: The batch norm node to fold
:return: True if the fold succeeded
"""
conv_params = conv_node_params(model, conv_node)
bn_params = get_batch_norm_params(model, bn_node)
folded_weight, folded_bias = _get_folded_conv_params(conv_params, bn_params)
if folded_weight is not None:
# Update conv weight to folded and bias if possible
update_model_param(model, folded_weight.name, folded_weight.val)
if folded_bias is not None:
bias_name = folded_bias.name
if bias_name is None:
bias_name = folded_weight.name.split("weight")[0] + "bias"
conv_node.input.append(bias_name)
update_model_param(model, bias_name, folded_bias.val)
swap_node_output(
conv_node, bn_node.output[0]
) # forward the folded conv outputs
remove_node_and_params_from_graph(model, bn_node) # remove the bn op
return True
return False
The provided code snippet includes necessary dependencies for implementing the `fold_conv_bns` function. Write a Python function `def fold_conv_bns(onnx_file: Union[str, onnx.ModelProto]) -> onnx.ModelProto` to solve the following problem:
When a batch norm op is the only child operator of a conv op, this function will fold the batch norm into the conv and return the processed graph :param onnx_file: file path to ONNX model to process or in-memory ModelProto to be modified in-place :return: A loaded ONNX model with BatchNormalization ops folded into Conv ops where possible
Here is the function:
def fold_conv_bns(onnx_file: Union[str, onnx.ModelProto]) -> onnx.ModelProto:
"""
When a batch norm op is the only child operator of a conv op, this function
will fold the batch norm into the conv and return the processed graph
:param onnx_file: file path to ONNX model to process or in-memory ModelProto
to be modified in-place
:return: A loaded ONNX model with BatchNormalization ops folded into Conv ops
where possible
"""
model = onnx.load(onnx_file) if isinstance(onnx_file, str) else onnx_file
conv_nodes = [n for n in model.graph.node if n.op_type == "Conv"]
graph_modified = False
for conv_node in conv_nodes:
conv_output = conv_node.output[0]
child_nodes = [n for n in model.graph.node if conv_output in n.input]
# Check if the only child of the conv output is a batch norm op
if len(child_nodes) == 1 and child_nodes[0].op_type == "BatchNormalization":
bn_node = child_nodes[0]
fold_performed = _fold_conv_bn(model, conv_node, bn_node)
graph_modified = fold_performed or graph_modified
return model if graph_modified else None | When a batch norm op is the only child operator of a conv op, this function will fold the batch norm into the conv and return the processed graph :param onnx_file: file path to ONNX model to process or in-memory ModelProto to be modified in-place :return: A loaded ONNX model with BatchNormalization ops folded into Conv ops where possible |
21,442 | from typing import Tuple, Union
import numpy as np
import onnx
from sparseml.onnx.utils.graph_editor import (
ONNXGraph,
remove_node_and_params_from_graph,
swap_node_output,
update_model_param,
)
from sparseml.onnx.utils.helpers import (
BatchNormParams,
NodeParam,
conv_node_params,
get_batch_norm_params,
get_quantize_parent_for_dequantize_node,
)
class ONNXGraph(object):
"""
Class for quick look-up of ONNX graph nodes and initializers. If graph state
changes outside of ONNXGraph class functions, update() should be called.
:param model: the ONNX graph to represent
"""
def __init__(self, model: ModelProto):
self._model = model
self._output_id_to_node = {}
self._input_id_to_nodes = defaultdict(list)
self._name_to_initializer = {}
self.update()
def nodes(self) -> Iterable[NodeProto]:
"""
:return: ordered collection of nodes in this graph
"""
return self._model.graph.node
def update(self, model: Optional[ModelProto] = None):
"""
Update the graph state based on the model this graph represents or
the given model.
:param model: model to represent. defaults to current loaded model state
"""
self._model = model or self._model
# nodes
self._output_id_to_node = {}
self._input_id_to_nodes = defaultdict(list)
for node in self._model.graph.node:
self._store_node_edges(node)
# initializers
self._name_to_initializer = {
init.name: init for init in self._model.graph.initializer
}
def get_init_by_name(
self,
name: str,
allow_optional: bool = True,
) -> Optional[TensorProto]:
"""
:param name: name of initializer
:param allow_optional: if True and the given name is not found as an
initializer, None will be returned. Otherwise a KeyError will be raised
:return: tensor of initializer with given name, returns None if the name does
not exist in the cached graph
"""
init = self._name_to_initializer.get(name, None)
if not allow_optional and init is None:
raise KeyError(f"Unable to find initializer {name} in ONNX model")
return init
def get_node_by_output_id(self, id: str) -> Optional[TensorProto]:
"""
:param id: name of output id of node
:return: the associated node if it is present in the graph, None otherwise
"""
return self._output_id_to_node.get(id)
def get_node_parents(
self, node: NodeProto
) -> List[Union[NodeProto, TensorProto, None]]:
"""
:param node: node to get the input objects for
:return: input nodes or tensors of this node in order. if an input does not
exist, None will be returned in its place
"""
inputs = []
for input_id in node.input:
inp = None
if input_id in self._output_id_to_node:
inp = self._output_id_to_node[input_id]
elif input_id in self._name_to_initializer:
inp = self._name_to_initializer[input_id]
inputs.append(inp)
return inputs
def get_node_single_parent(
self, node: NodeProto, index: int
) -> Union[NodeProto, None]:
"""
:param node: the node to get the parent node of
:param index: choose which input to search
:return: parent of node if it only has one parent, otherwise None
"""
input_id = node.input[index]
if input_id not in self._output_id_to_node:
return None
return self._output_id_to_node[input_id]
def get_node_children(self, node: NodeProto) -> List[NodeProto]:
"""
:param node: the node to get the children node of
:return: list of nodes that include this node as an output
"""
children = []
for output_id in node.output:
children.extend(self._input_id_to_nodes[output_id])
return children
def get_node_single_child(self, node: NodeProto) -> Union[NodeProto, None]:
"""
:param node: the node to get the child node of
:return: child of node if it only has one child, otherwise None
"""
children = self.get_node_children(node)
return children[0] if len(children) == 1 else None
def add_node(self, node: NodeProto):
"""
Adds the given node to the model and graph state
:param node: node to add to the model
"""
self._model.graph.node.append(node)
self._store_node_edges(node)
def update_node_input(
self, node: NodeProto, input_id: str, input_idx: Optional[int] = None
):
"""
:param node: node to update the inputs of
:param input_id: new input_id to attach to the node
:param input_idx: optional index of the node input list to update,
if none is given, the new input id will be appended to the input list
"""
if input_idx is not None:
if node in self._input_id_to_nodes[node.input[input_idx]]:
self._input_id_to_nodes[node.input[input_idx]].remove(node)
node.input[input_idx] = input_id
else:
node.input.append(input_id)
self._input_id_to_nodes[input_id].append(node)
def delete_node(self, node: NodeProto):
"""
deletes the given node from the graph
:param node: node to delete
"""
self._model.graph.node.remove(node)
self._delete_node_edges(node)
def delete_nodes(self, nodes: List[NodeProto]):
"""
deletes the given nodes from the graph
:param nodes: list of nodes to delete
"""
node_output_ids_to_delete = {node.output[0] for node in nodes}
nodes_to_keep = []
for node in self._model.graph.node:
if node.output[0] in node_output_ids_to_delete:
self._delete_node_edges(node)
else:
nodes_to_keep.append(node)
self._model.graph.ClearField("node")
self._model.graph.node.extend(nodes_to_keep)
def delete_initializers(self, initializers: List[Union[str, TensorProto]]):
"""
deletes the given initializers from the model
:param initializers: list of initializers or initializer names to delete
"""
inits_to_delete = {
init if isinstance(init, str) else init.name for init in initializers
}
inits_to_keep = []
for init in self._model.graph.initializer:
if init.name in inits_to_delete:
# keep edge reference if nodes in the graph still point to the
# initializer name
if not self._input_id_to_nodes[init.name]:
del self._input_id_to_nodes[init.name]
del self._name_to_initializer[init.name]
else:
inits_to_keep.append(init)
self._model.graph.ClearField("initializer")
self._model.graph.initializer.extend(inits_to_keep)
def delete_unused_initializers(self):
"""
deletes tensors in the initializer list that are not listed as inputs to any
node in the current graph state or directly passed as model outputs
"""
output_names = {out.name for out in self._model.graph.output}
self.delete_initializers(
[
init
for init in self._model.graph.initializer
if not self._input_id_to_nodes[init.name]
and (init.name not in output_names)
]
) # delete inits that have no edge
def find_orphaned_nodes(self, node: NodeProto) -> List[NodeProto]:
"""
Given a node, that is to be removed from the graph, find all nodes that
will be orphaned as a result of the removal. Orphaned nodes are nodes
that will have no inputs after the removal of the given node.
The method traverses the graph upwards from the given node until
a node with multiple outputs is found. All nodes that are traversed
are considered orphaned and will be removed.
:param node: The node to remove
:return: A tuple of the model and a list of orphaned nodes
"""
nodes_to_delete = [node]
# start queue with previous positions input node
queue = [node]
while queue:
current_node = queue.pop(0)
if not isinstance(current_node, NodeProto):
continue
node_parents = self.get_node_parents(current_node)
# if node parent has only one output (current child)
# than it is orphaned and will be removed.
# continue traversing the graph upwards until
# a node with output that is not current child is found
for parent in node_parents:
if not isinstance(parent, NodeProto):
# if parent is not a node, it is a graph input
# and should not be removed
continue
elif parent.op_type == "Constant":
# if constant node is found,
# automatically remove it and continue traversing
nodes_to_delete.append(parent)
parent_output_node_names = set(
n.name for n in self.get_node_parents(node=parent)
)
if len(parent_output_node_names) == 1:
# if parent has only one output, it is orphaned
queue.append(parent)
nodes_to_delete.append(parent)
elif not parent_output_node_names.difference(
set(n.name for n in nodes_to_delete)
):
# if parent has multiple outputs, but they are all already in the
# nodes_to_delete list, it is orphaned
queue.append(parent)
nodes_to_delete.append(parent)
return nodes_to_delete
def sort_nodes_topologically(self):
"""
Sorts the order of the graph Node repeated field in place in topological
order as per the ONNX Model proto specifications
"""
if len(self._model.graph.node) == 1:
return
# build toposort DAG input and sort
model_dag = defaultdict(set) # node_id -> dependencies
for parent_node_id, child_nodes in self._input_id_to_nodes.items():
if parent_node_id not in self._output_id_to_node:
continue # parent is an initializer, not node
# standardize all references to nodes by their first output id
parent_node_id = self._output_id_to_node[parent_node_id].output[0]
for child_node in child_nodes:
model_dag[child_node.output[0]].add(parent_node_id)
sorted_node_ids = toposort_flatten(model_dag)
# deduplicate any nodes from the sorted list
updated_node_list = []
seen_ids = set()
for node_id in sorted_node_ids:
if node_id in seen_ids:
continue # a node could have multiple ids, all ids will be updated
node = self._output_id_to_node[node_id]
updated_node_list.append(node)
seen_ids.update(node.output)
# update model node list with topo sorted list
assert len(updated_node_list) == len(self._model.graph.node)
self._model.graph.ClearField("node")
self._model.graph.node.extend(updated_node_list)
def get_orphaned_nodes(
self,
graph_output_ids: Optional[Iterable[str]] = None,
) -> List[NodeProto]:
"""
:param graph_output_ids: iterable of output ids in graph. if not supplied,
will be read from the model
:return: list of all nodes in the graph that are not inputs to
other nodes or outputs of the graph
"""
if graph_output_ids is None:
graph_output_ids = {output.name for output in self._model.graph.output}
orphaned_nodes = []
for node in self.nodes:
node_is_orphaned = True
# iterate over possible output ids, in practice, there is almost
# always only 1
for out_id in node.output:
if out_id in self._input_id_to_nodes or out_id in graph_output_ids:
node_is_orphaned = False
if node_is_orphaned:
orphaned_nodes.append(node)
return orphaned_nodes
def delete_orphaned_node_branches(self):
"""
Deletes all nodes in the graph that are not inputs to other nodes or outputs of
the graph. Additionally deletes all nodes that would become orphaned
after the node deletion until the graph contains no orphaned nodes
"""
graph_output_ids = {output.name for output in self._model.graph.output}
orphaned_nodes = self.get_orphaned_nodes(graph_output_ids=graph_output_ids)
while orphaned_nodes:
# no need to refresh self, delete nodes should update internal graph edges
self.delete_nodes(orphaned_nodes)
self.update()
self.delete_unused_initializers()
# update now orphaned nodes, can only run up to len(nodes) times
orphaned_nodes = self.get_orphaned_nodes(graph_output_ids=graph_output_ids)
def _store_node_edges(self, node: NodeProto):
for output_id in node.output:
self._output_id_to_node[output_id] = node
for input_id in node.input:
self._input_id_to_nodes[input_id].append(node)
def _delete_node_edges(self, node: NodeProto):
# remove node edges from cache
for output_id in node.output:
del self._output_id_to_node[output_id]
for input_id in node.input:
self._input_id_to_nodes[input_id].remove(node)
def get_quantize_parent_for_dequantize_node(
quantized_model: ModelProto, dequantize_node: NodeProto
) -> Union[NodeProto, None]:
"""
Returns the first quantize node found by traversing the first node input of the
given de-quantize node's ancestors. All inputs to de-quantize nodes should have
a quantize node ancestor.
:param quantized_model: the model the de-quantize node is from
:param dequantize_node: the node to get an associated quantize node for
:return: the first quantize node found by traversing the first node input of the
given de-quantize node's ancestors. If no quantize node is found, returns None
"""
curr_node = dequantize_node
while curr_node is not None and curr_node.op_type != "QuantizeLinear":
input_nodes = get_node_input_nodes(quantized_model, curr_node)
curr_node = input_nodes[0] if input_nodes else None
return curr_node
The provided code snippet includes necessary dependencies for implementing the `quantize_resnet_identity_add_inputs` function. Write a Python function `def quantize_resnet_identity_add_inputs(quantized_model: onnx.ModelProto) -> bool` to solve the following problem:
To avoid storing the identity value of a ResNet block in fp32, this optimization will pass the identity value through the same quantize operation as the ResNet block and add a de-quantize operation for the identity before the add. Function will match to any add operation whose inputs are the output of a relu or add op and a quantize -> de-quantize block that takes the same relu as input. Performs this optimization in place. :param quantized_model: A loaded quantized model to perform this optimization on :return: True if an in-place optimization was made
Here is the function:
def quantize_resnet_identity_add_inputs(quantized_model: onnx.ModelProto) -> bool:
"""
To avoid storing the identity value of a ResNet block in fp32, this optimization
will pass the identity value through the same quantize operation as the ResNet
block and add a de-quantize operation for the identity before the add.
Function will match to any add operation whose inputs are the output of a relu
or add op and a quantize -> de-quantize block that takes the same relu as input.
Performs this optimization in place.
:param quantized_model: A loaded quantized model to perform this optimization on
:return: True if an in-place optimization was made
"""
add_nodes = [node for node in quantized_model.graph.node if node.op_type == "Add"]
optimization_made = False
for add_node in add_nodes:
graph = ONNXGraph(quantized_model)
add_inputs = [
i for i in graph.get_node_parents(add_node) if isinstance(i, onnx.NodeProto)
]
if len(add_inputs) != 2:
continue
# extract dequantize input and relu/add input
dequantize_node = [i for i in add_inputs if i.op_type == "DequantizeLinear"]
other_input_node = [i for i in add_inputs if i.op_type in ["Add", "Relu"]]
if not dequantize_node or not other_input_node: # pattern not matched
continue
dequantize_node = dequantize_node[0] # unwrap
other_input_node = other_input_node[0] # unwrap
quantize_node = get_quantize_parent_for_dequantize_node(
quantized_model, dequantize_node
)
# check that the quantize block takes input from the same relu
if (
quantize_node is None
or quantize_node.input[0] != other_input_node.output[0]
):
continue
# create de-quantize node for identity
identity_dequantize_inputs = [quantize_node.output[0]] + quantize_node.input[1:]
dequantize_identity_output_name = "{}_identity_dequantized".format(
other_input_node.output[0]
)
dequantize_identity_node_name = "{}_identity_dequantized".format(
other_input_node.output[0]
)
identity_dequantize_node = onnx.helper.make_node(
"DequantizeLinear",
identity_dequantize_inputs,
[dequantize_identity_output_name],
dequantize_identity_node_name,
)
quantized_model.graph.node.append(identity_dequantize_node)
# swap the relu input for the de-quantized identity in the add
relu_input_idx = [
i
for i, inp in enumerate(add_node.input)
if inp == other_input_node.output[0]
][0]
add_node.input[relu_input_idx] = dequantize_identity_output_name
optimization_made = True
return optimization_made | To avoid storing the identity value of a ResNet block in fp32, this optimization will pass the identity value through the same quantize operation as the ResNet block and add a de-quantize operation for the identity before the add. Function will match to any add operation whose inputs are the output of a relu or add op and a quantize -> de-quantize block that takes the same relu as input. Performs this optimization in place. :param quantized_model: A loaded quantized model to perform this optimization on :return: True if an in-place optimization was made |
21,443 | from typing import Tuple, Union
import numpy as np
import onnx
from sparseml.onnx.utils.graph_editor import (
ONNXGraph,
remove_node_and_params_from_graph,
swap_node_output,
update_model_param,
)
from sparseml.onnx.utils.helpers import (
BatchNormParams,
NodeParam,
conv_node_params,
get_batch_norm_params,
get_quantize_parent_for_dequantize_node,
)
def _make_dequant_node_for_quant(quant_node: onnx.NodeProto) -> onnx.NodeProto:
return onnx.helper.make_node(
"DequantizeLinear",
[quant_node.output[0]] + quant_node.input[1:], # new inputs
[f"{quant_node.output[0]}_dequantized"], # output name
f"{quant_node.name or quant_node.output[0]}_dequantized", # node name
) | null |
21,444 | from collections import defaultdict
from typing import Iterable, List, Optional, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from toposort import toposort_flatten
from sparseml.onnx.utils.helpers import get_node_params
def _override_tensor_batch_dim(model, tensor, batch_size):
for init in model.graph.initializer:
if init.name == tensor.name:
# This tensor is actually an initializer => skip
return
shape = tensor.type.tensor_type.shape
# skip tensors with variable batch sizes
if not shape.dim[0].dim_param and shape.dim[0].dim_value > 0:
shape.dim[0].dim_value = batch_size
The provided code snippet includes necessary dependencies for implementing the `override_model_batch_size` function. Write a Python function `def override_model_batch_size(model: ModelProto, batch_size: int) -> ModelProto` to solve the following problem:
Rewrites any positive batch dimensions in the model inputs or outputs to the given batch_size :param model: Model to modify :param batch_size: Batch size to enforce :return: the given model with inputs and outputs set to batch_size if the batch dimensions are not -1.
Here is the function:
def override_model_batch_size(model: ModelProto, batch_size: int) -> ModelProto:
"""
Rewrites any positive batch dimensions in the model inputs or outputs to the
given batch_size
:param model: Model to modify
:param batch_size: Batch size to enforce
:return: the given model with inputs and outputs set to batch_size if the batch
dimensions are not -1.
"""
for tensor in model.graph.input:
# This may not work for ONNX graphs that have hard-coded reshape nodes
_override_tensor_batch_dim(model, tensor, batch_size)
# Do the same for outputs
for tensor in model.graph.output:
# Ignore augmented _Reduce nodes
if "_Reduce" not in tensor.name:
_override_tensor_batch_dim(model, tensor, batch_size) | Rewrites any positive batch dimensions in the model inputs or outputs to the given batch_size :param model: Model to modify :param batch_size: Batch size to enforce :return: the given model with inputs and outputs set to batch_size if the batch dimensions are not -1. |
21,445 | from collections import defaultdict
from typing import Iterable, List, Optional, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from toposort import toposort_flatten
from sparseml.onnx.utils.helpers import get_node_params
def update_model_param(
model: ModelProto,
param_name: str,
val: numpy.ndarray,
) -> None:
"""
Removes the parameter with name param_name from the model
Creates a new parameter using val
Adds val to the model with name param_name as an update
:param model: The model to update
:param param_name: The parameter name in the model to update
:param val: The new value of the parameter
"""
param_matches = [
param for param in model.graph.initializer if param.name == param_name
]
if param_matches:
model.graph.initializer.remove(param_matches[0])
new_param = numpy_helper.from_array(val, param_name)
model.graph.initializer.append(new_param)
def prune_unstructured(array: numpy.ndarray, sparsity: float) -> numpy.ndarray:
"""
Prune a numpy array with unstructured sparsity according to magnitude pruning
:param array: the array to prune (introduce zeros), will remove the lowest
absolute values in the array
:param sparsity: the sparsity value, as a decimal, to impose in the array
:return: the pruned array
"""
array = numpy.array(array) # make a copy because arrays from onnx are read only
sparse_index = int(round(sparsity * array.size) - 1)
if sparse_index < 0:
return array
sorted_array = numpy.sort(numpy.abs(array.flatten()))
sparse_thresh = sorted_array[sparse_index]
array[numpy.abs(array) < sparse_thresh] = 0
return array
def get_node_params(
model: ModelProto, node: NodeProto, include_values: bool = True
) -> Tuple[NodeParam, Union[NodeParam, None]]:
"""
Get the params (weight and bias) for a node in an ONNX ModelProto.
Must be an op type of one of [conv, gemm, matmul]
:param model: the model proto loaded from the ONNX file
:param node: the conv node to get the params for
:param include_values: True to include the param values as NumPy arrays
in the returned NodeParam objects.
False to not load the values -- in this event NodeParam.val will be None
:return: a tuple containing the weight, bias (if it is present)
"""
node_id = extract_node_id(node)
if str(node.op_type).lower() == "conv":
return conv_node_params(model, node, include_values)
if str(node.op_type).lower() == "gemm":
return gemm_node_params(model, node, include_values)
if str(node.op_type).lower() == "matmul":
return matmul_node_params(model, node, include_values)
raise ValueError(
(
"node_id of {} is not a supported node (conv, gemm, matmul) "
"for params: {}"
).format(node_id, node)
)
The provided code snippet includes necessary dependencies for implementing the `prune_model_one_shot` function. Write a Python function `def prune_model_one_shot( model: ModelProto, nodes: List[NodeProto], sparsity: Union[float, List[float]] )` to solve the following problem:
Prune a model in-place with one shot pruning (no retraining) according to magnitude pruning. Does so in an unstructured way currently :param model: the model to apply pruning to :param nodes: the nodes within the model to prune to the desired sparsities :param sparsity: the sparsity level to prune all nodes to if a float, or the sparsity level to prune each node to if a list of floats :return: the new, pruned model
Here is the function:
def prune_model_one_shot(
model: ModelProto, nodes: List[NodeProto], sparsity: Union[float, List[float]]
):
"""
Prune a model in-place with one shot pruning (no retraining) according to
magnitude pruning. Does so in an unstructured way currently
:param model: the model to apply pruning to
:param nodes: the nodes within the model to prune to the desired sparsities
:param sparsity: the sparsity level to prune all nodes to if a float,
or the sparsity level to prune each node to if a list of floats
:return: the new, pruned model
"""
if not isinstance(sparsity, Iterable):
tmp = float(sparsity)
sparsity = [tmp for _ in range(len(nodes))]
if len(nodes) != len(sparsity):
raise ValueError(
"len(nodes) {} does not match len(sparsity) {}".format(
len(nodes), len(sparsity)
)
)
for node, sparsity in zip(nodes, sparsity):
weight, bias = get_node_params(model, node)
pruned_weight_val = prune_unstructured(weight.val, sparsity)
update_model_param(model, weight.name, pruned_weight_val) | Prune a model in-place with one shot pruning (no retraining) according to magnitude pruning. Does so in an unstructured way currently :param model: the model to apply pruning to :param nodes: the nodes within the model to prune to the desired sparsities :param sparsity: the sparsity level to prune all nodes to if a float, or the sparsity level to prune each node to if a list of floats :return: the new, pruned model |
21,446 | from collections import defaultdict
from typing import Iterable, List, Optional, Union
import numpy
import onnx
from onnx import ModelProto, NodeProto, TensorProto, numpy_helper
from toposort import toposort_flatten
from sparseml.onnx.utils.helpers import get_node_params
def update_model_param(
model: ModelProto,
param_name: str,
val: numpy.ndarray,
) -> None:
"""
Removes the parameter with name param_name from the model
Creates a new parameter using val
Adds val to the model with name param_name as an update
:param model: The model to update
:param param_name: The parameter name in the model to update
:param val: The new value of the parameter
"""
param_matches = [
param for param in model.graph.initializer if param.name == param_name
]
if param_matches:
model.graph.initializer.remove(param_matches[0])
new_param = numpy_helper.from_array(val, param_name)
model.graph.initializer.append(new_param)
def prune_unstructured(array: numpy.ndarray, sparsity: float) -> numpy.ndarray:
"""
Prune a numpy array with unstructured sparsity according to magnitude pruning
:param array: the array to prune (introduce zeros), will remove the lowest
absolute values in the array
:param sparsity: the sparsity value, as a decimal, to impose in the array
:return: the pruned array
"""
array = numpy.array(array) # make a copy because arrays from onnx are read only
sparse_index = int(round(sparsity * array.size) - 1)
if sparse_index < 0:
return array
sorted_array = numpy.sort(numpy.abs(array.flatten()))
sparse_thresh = sorted_array[sparse_index]
array[numpy.abs(array) < sparse_thresh] = 0
return array
def get_node_params(
model: ModelProto, node: NodeProto, include_values: bool = True
) -> Tuple[NodeParam, Union[NodeParam, None]]:
"""
Get the params (weight and bias) for a node in an ONNX ModelProto.
Must be an op type of one of [conv, gemm, matmul]
:param model: the model proto loaded from the ONNX file
:param node: the conv node to get the params for
:param include_values: True to include the param values as NumPy arrays
in the returned NodeParam objects.
False to not load the values -- in this event NodeParam.val will be None
:return: a tuple containing the weight, bias (if it is present)
"""
node_id = extract_node_id(node)
if str(node.op_type).lower() == "conv":
return conv_node_params(model, node, include_values)
if str(node.op_type).lower() == "gemm":
return gemm_node_params(model, node, include_values)
if str(node.op_type).lower() == "matmul":
return matmul_node_params(model, node, include_values)
raise ValueError(
(
"node_id of {} is not a supported node (conv, gemm, matmul) "
"for params: {}"
).format(node_id, node)
)
The provided code snippet includes necessary dependencies for implementing the `prune_model_one_shot_iter` function. Write a Python function `def prune_model_one_shot_iter( model: ModelProto, nodes: List[NodeProto], sparsity: Union[float, List[float]] )` to solve the following problem:
Iteratively prune a model in-place with one shot pruning (no retraining) according to magnitude pruning. Does so in an unstructured way currently :param model: the model to apply pruning to :param nodes: the nodes within the model to prune to the desired sparsities :param sparsity: the sparsity level to prune all nodes to if a float, or the sparsity level to prune each node to if a list of floats
Here is the function:
def prune_model_one_shot_iter(
model: ModelProto, nodes: List[NodeProto], sparsity: Union[float, List[float]]
):
"""
Iteratively prune a model in-place with one shot pruning (no retraining) according
to magnitude pruning. Does so in an unstructured way currently
:param model: the model to apply pruning to
:param nodes: the nodes within the model to prune to the desired sparsities
:param sparsity: the sparsity level to prune all nodes to if a float,
or the sparsity level to prune each node to if a list of floats
"""
if not isinstance(sparsity, Iterable):
tmp = float(sparsity)
sparsity = [tmp for _ in range(len(nodes))]
if len(nodes) != len(sparsity):
raise ValueError(
"len(nodes) {} does not match len(sparsity) {}".format(
len(nodes), len(sparsity)
)
)
for index, (node, sparsity) in enumerate(zip(nodes, sparsity)):
weight, bias = get_node_params(model, node)
pruned_weight_val = prune_unstructured(weight.val, sparsity)
update_model_param(model, weight.name, pruned_weight_val)
yield (index + 1) / len(nodes) | Iteratively prune a model in-place with one shot pruning (no retraining) according to magnitude pruning. Does so in an unstructured way currently :param model: the model to apply pruning to :param nodes: the nodes within the model to prune to the desired sparsities :param sparsity: the sparsity level to prune all nodes to if a float, or the sparsity level to prune each node to if a list of floats |
21,447 | import logging
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
import torch
from tqdm import tqdm
from sparseml.core.model.base import ModifiableModel
from sparseml.core.state import State
from sparseml.modifiers.pruning.wanda.base import WandaPruningModifier
from sparseml.modifiers.pruning.wanda.utils.wanda_wrapper import WandaWrapper
from sparseml.modifiers.utils.layer_compressor import LayerCompressor
from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward
from sparseml.utils.pytorch.module import get_prunable_layers
def _get_activations(model, data_loader, nsamples=128):
import functools
model.eval()
acts = {}
def save_acts(module, input, name):
if isinstance(input, tuple):
input = input[0]
if name not in acts:
acts[name] = 1.0 / nsamples * input.detach().pow(2).sum(dim=(0, 1)).sqrt()
else:
acts[name] += 1.0 / nsamples * input.detach().pow(2).sum(dim=(0, 1)).sqrt()
hooks = []
for name, mod in model.named_modules():
if isinstance(mod, torch.nn.Linear) and "lm_head" not in name:
hooks.append(
mod.register_forward_pre_hook(functools.partial(save_acts, name=name))
)
device = next(model.parameters()).device
for batch in tqdm(data_loader):
batch = {k: v.to(device) for k, v in batch.items()}
model(**batch)
batch = None
torch.cuda.empty_cache()
for h in hooks:
h.remove()
return acts | null |
21,448 | import math
import re
from dataclasses import dataclass
from typing import Any, Callable, Dict
from sparseml.core import Event, State
class PruningCreateSettings:
start: float
end: float
update: float
init_sparsity: float
final_sparsity: float
args: Dict[str, Any]
SchedulerCalculationType = Callable[[Event, State], float]
def create_custom_scheduler(
scheduler_type: str, settings: PruningCreateSettings
) -> SchedulerCalculationType:
pattern = re.compile(r"calc\(([^()]*)\)")
match = pattern.search(scheduler_type)
if not match:
raise ValueError(f"invalid calc string {scheduler_type}")
inner_expr = match.group(1)
def _schedule(event: Event, state: State):
return eval(
inner_expr,
{"math": math},
{
"start": settings.start,
"end": settings.end,
"update": settings.update,
"init_sparsity": settings.init_sparsity,
"final_sparsity": settings.final_sparsity,
**(settings.args if settings.args else {}),
"index": event.current_index,
},
)
return _schedule | null |
21,449 | import math
import re
from dataclasses import dataclass
from typing import Any, Callable, Dict
from sparseml.core import Event, State
class PruningCreateSettings:
start: float
end: float
update: float
init_sparsity: float
final_sparsity: float
args: Dict[str, Any]
SchedulerCalculationType = Callable[[Event, State], float]
def linear_scheduler(settings: PruningCreateSettings) -> SchedulerCalculationType:
def _schedule(event: Event, state: State) -> float:
per_complete = (event.current_index - settings.start) / (
settings.end - settings.start
)
return (
settings.init_sparsity
+ (settings.final_sparsity - settings.init_sparsity) * per_complete
)
return _schedule | null |
21,450 | import math
import re
from dataclasses import dataclass
from typing import Any, Callable, Dict
from sparseml.core import Event, State
class PruningCreateSettings:
start: float
end: float
update: float
init_sparsity: float
final_sparsity: float
args: Dict[str, Any]
SchedulerCalculationType = Callable[[Event, State], float]
def polynomial_decay_scheduler(
settings: PruningCreateSettings,
) -> SchedulerCalculationType:
args = settings.args if settings.args else {}
exponent = args.get("exponent", 2)
def _schedule(event: Event, state: State) -> float:
per_complete = (event.current_index - settings.start) / (
settings.end - settings.start
)
scaled_complete = pow(per_complete - 1, exponent) + 1
return (
settings.init_sparsity
+ (settings.final_sparsity - settings.init_sparsity) * scaled_complete
)
return _schedule
def cubic_scheduler(settings: PruningCreateSettings) -> SchedulerCalculationType:
settings.args = {"exponent": 3}
return polynomial_decay_scheduler(settings) | null |
21,451 | import math
import re
from dataclasses import dataclass
from typing import Any, Callable, Dict
from sparseml.core import Event, State
class PruningCreateSettings:
SchedulerCalculationType = Callable[[Event, State], float]
def polynomial_scheduler(settings: PruningCreateSettings) -> SchedulerCalculationType:
args = settings.args if settings.args else {}
exponent = args.get("exponent", 2)
def _schedule(event: Event, state: State) -> float:
per_complete = (event.current_index - settings.start) / (
settings.end - settings.start
)
scaled_complete = per_complete**exponent
return (
settings.init_sparsity
+ (settings.final_sparsity - settings.init_sparsity) * scaled_complete
)
return _schedule | null |
21,452 | import math
import re
from dataclasses import dataclass
from typing import Any, Callable, Dict
from sparseml.core import Event, State
class PruningCreateSettings:
SchedulerCalculationType = Callable[[Event, State], float]
def multi_step_scheduler(settings: PruningCreateSettings) -> SchedulerCalculationType:
args = settings.args if settings.args else {}
steps = args.get("steps", [])
steps = sorted(steps, key=lambda x: x[0])
def _schedule(event: Event, state: State) -> float:
current_sparsity = settings.init_sparsity
for (index, sparsity) in steps:
if event.current_index >= index:
current_sparsity = sparsity
return current_sparsity
return _schedule | null |
21,453 | from dataclasses import dataclass
from typing import Dict
import torch
from pydantic import BaseModel
from torch.nn import Module, Parameter
from torch.utils.hooks import RemovableHandle
from sparseml.core import ModelParameterizedLayer
The provided code snippet includes necessary dependencies for implementing the `param_mask_name` function. Write a Python function `def param_mask_name() -> str` to solve the following problem:
Name to use for mask buffer on a sparse layer
Here is the function:
def param_mask_name() -> str:
"""
Name to use for mask buffer on a sparse layer
"""
return "mask" | Name to use for mask buffer on a sparse layer |
21,454 | from dataclasses import dataclass
from typing import Dict
import torch
from pydantic import BaseModel
from torch.nn import Module, Parameter
from torch.utils.hooks import RemovableHandle
from sparseml.core import ModelParameterizedLayer
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def setup_mask_for_param(param: Parameter, mask: torch.Tensor) -> torch.Tensor:
if mask is None:
raise ValueError("Mask cannot be None")
if mask.shape != param.data.shape:
raise ValueError(
f"Mask shape {mask.shape} does not match " f"param shape {param.data.shape}"
)
if mask.dtype != torch.bool:
raise ValueError("Mask must be a boolean tensor")
return param.data.new_tensor(mask, dtype=torch.bool) | null |
21,455 | import re
from dataclasses import dataclass
from typing import Callable, Optional
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
class PruningMaskCreatorArgs:
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def unstructured_pruning(mask_structure: str):
if mask_structure != "unstructured":
raise ValueError(f"Invalid mask_structure: {mask_structure}")
def _create_mask(args: PruningMaskCreatorArgs) -> Tensor:
prune_elements = int(args.sparsity * args.scores.numel())
mask = (
args.prev_mask
if args.prev_mask is not None
else torch.ones_like(args.parameter.data, dtype=torch.bool)
)
if prune_elements > 0:
threshold, _ = torch.topk(
args.scores.view(-1), prune_elements, largest=False
)
mask = (args.scores > threshold[-1]).to(dtype=torch.bool)
else:
mask = torch.ones_like(mask, dtype=torch.bool)
return mask
return _create_mask | null |
21,456 | import re
from dataclasses import dataclass
from typing import Callable, Optional
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
class PruningMaskCreatorArgs:
parameter: Parameter
sparsity: float
scores: Tensor
prev_mask: Optional[Tensor] = None
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def channel_pruning(mask_structure: str, aggregate: str = "sum"):
if mask_structure != "channel":
raise ValueError(f"Invalid mask_structure: {mask_structure}")
def _aggregate(tensor, method="sum"):
return getattr(tensor, method)(dim=(1, 2, 3))
def _create_mask(args: PruningMaskCreatorArgs) -> Tensor:
prune_channels = int(args.sparsity * args.scores.size(0))
aggregated_scores = _aggregate(args.scores, aggregate)
_, top_indices = torch.topk(aggregated_scores, prune_channels, largest=False)
mask = torch.ones_like(args.scores, dtype=torch.bool)
mask[top_indices, :, :, :] = 0
return mask
return _create_mask | null |
21,457 | import re
from dataclasses import dataclass
from typing import Callable, Optional
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
class PruningMaskCreatorArgs:
parameter: Parameter
sparsity: float
scores: Tensor
prev_mask: Optional[Tensor] = None
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def filter_pruning(mask_structure: str, aggregate: str = "sum"):
if mask_structure != "filter":
raise ValueError(f"Invalid mask_structure: {mask_structure}")
def _aggregate(tensor, method="sum"):
return getattr(tensor, method)(dim=(0, 2, 3))
def _create_mask(args: PruningMaskCreatorArgs) -> Tensor:
prune_filters = int(args.sparsity * args.scores.size(1))
aggregated_scores = _aggregate(args.scores, aggregate)
_, top_indices = torch.topk(aggregated_scores, prune_filters, largest=False)
mask = torch.ones_like(args.scores, dtype=torch.bool)
mask[:, top_indices, :, :] = 0
return mask
return _create_mask | null |
21,458 | import re
from dataclasses import dataclass
from typing import Callable, Optional
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
class PruningMaskCreatorArgs:
parameter: Parameter
sparsity: float
scores: Tensor
prev_mask: Optional[Tensor] = None
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def block_pruning(mask_structure: str, aggregate: str = "sum"):
pattern = re.compile(r"^block_(.*)")
match = pattern.search(mask_structure)
if not match:
raise ValueError(f"invalid block mask type {mask_structure}")
block_dims = list(map(int, match.group(1).split(",")))
def _aggregate_block(block, method="sum"):
return getattr(block, method)()
def _create_mask(args: PruningMaskCreatorArgs) -> Tensor:
block_view = args.scores
for dim, size in enumerate(block_dims):
block_view = block_view.unfold(dimension=dim, size=size, step=size)
block_sums = _aggregate_block(block_view, aggregate)
prune_blocks = int(args.sparsity * block_sums.numel())
threshold, _ = torch.topk(block_sums.view(-1), prune_blocks, largest=False)
mask = (block_sums > threshold[-1]).float().unsqueeze(-1)
for size in block_dims:
mask = mask.repeat_interleave(size, dim=-1)
return mask.to(dtype=torch.bool)
return _create_mask | null |
21,459 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def identity_transform(name: str, **kwargs):
if name != "identity":
raise ValueError(f"Invalid transform name: {name}")
def _create_transform(val: TensorOrCollectionType) -> TensorOrCollectionType:
return val
return _create_transform | null |
21,460 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_apply(
val: TensorOrCollectionType,
func: Callable[[Tensor], Tensor],
) -> TensorOrCollectionType:
if isinstance(val, Tensor):
return func(val)
if isinstance(val, Sequence):
return [recursive_apply(item, func) for item in val]
if isinstance(val, dict):
return {key: recursive_apply(item, func) for key, item in val.items()}
raise ValueError(f"Unsupported type for recursive_apply: {type(val)}")
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def softmax_transform(name: str, temperature: float = 1.0, dim: int = -1, **kwargs):
if name != "softmax":
raise ValueError(f"Invalid transform name: {name}")
def _softmax(val: Tensor) -> Tensor:
val = val / temperature
return torch.softmax(val, dim=dim)
def _create_transform(val: TensorOrCollectionType) -> TensorOrCollectionType:
return recursive_apply(val, _softmax)
return _create_transform | null |
21,461 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_apply(
val: TensorOrCollectionType,
func: Callable[[Tensor], Tensor],
) -> TensorOrCollectionType:
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def log_softmax_transform(name: str, temperature: float = 1.0, dim: int = -1, **kwargs):
if name != "log_softmax":
raise ValueError(f"Invalid transform name: {name}")
def _log_softmax(val: Tensor) -> Tensor:
val = val / temperature
return torch.log_softmax(val, dim=dim)
def _create_transform(val: TensorOrCollectionType) -> TensorOrCollectionType:
return recursive_apply(val, _log_softmax)
return _create_transform | null |
21,462 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_apply(
val: TensorOrCollectionType,
func: Callable[[Tensor], Tensor],
) -> TensorOrCollectionType:
if isinstance(val, Tensor):
return func(val)
if isinstance(val, Sequence):
return [recursive_apply(item, func) for item in val]
if isinstance(val, dict):
return {key: recursive_apply(item, func) for key, item in val.items()}
raise ValueError(f"Unsupported type for recursive_apply: {type(val)}")
def normalize_transform(
name: str,
p: float = 1,
dim: int = -1,
eps: float = 1e-12,
mean: bool = False,
std: bool = False,
**kwargs,
):
if name != "normalize":
raise ValueError(f"Invalid transform name: {name}")
def _normalize(val: Tensor) -> Tensor:
out = TF.normalize(val, p=p, dim=dim, eps=eps)
if mean:
out = out - out.mean(dim=dim, keepdim=True)
if std:
out = out / out.std(dim=dim, keepdim=True)
return out
def _create_transform(val: TensorOrCollectionType) -> TensorOrCollectionType:
return recursive_apply(val, _normalize)
return _create_transform | null |
21,463 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_combine(
val_one: TensorOrCollectionType,
val_two: TensorOrCollectionType,
func: Callable[[Tensor, Tensor], Tensor],
):
if type(val_one) != type(val_two):
raise ValueError(
f"val_one type of {type(val_one)} must match "
f"val_two type of {type(val_two)}"
)
if isinstance(val_one, Tensor):
return func(val_one, val_two)
if isinstance(val_one, Sequence):
return [
recursive_combine(item_one, item_two, func)
for item_one, item_two in zip(val_one, val_two)
]
if isinstance(val_one, dict):
return {
key: recursive_combine(val_one[key], val_two[key], func)
for key in val_one.keys()
}
raise ValueError(f"Unsupported type for recursive_combine: {type(val_one)}")
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def l1_comparison(name: str, dim: int = -1, **kwargs):
if name != "l1_distance":
raise ValueError(f"Invalid comparison name: {name}")
def _l1(val_one: Tensor, val_two: Tensor) -> Tensor:
return torch.sum(torch.abs(val_one - val_two), dim=dim)
def _create_comparison(
val_one: TensorOrCollectionType, val_two: TensorOrCollectionType
) -> TensorOrCollectionType:
return recursive_combine(val_one, val_two, _l1)
return _create_comparison | null |
21,464 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_combine(
val_one: TensorOrCollectionType,
val_two: TensorOrCollectionType,
func: Callable[[Tensor, Tensor], Tensor],
):
if type(val_one) != type(val_two):
raise ValueError(
f"val_one type of {type(val_one)} must match "
f"val_two type of {type(val_two)}"
)
if isinstance(val_one, Tensor):
return func(val_one, val_two)
if isinstance(val_one, Sequence):
return [
recursive_combine(item_one, item_two, func)
for item_one, item_two in zip(val_one, val_two)
]
if isinstance(val_one, dict):
return {
key: recursive_combine(val_one[key], val_two[key], func)
for key in val_one.keys()
}
raise ValueError(f"Unsupported type for recursive_combine: {type(val_one)}")
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def l2_comparison(name: str, dim: int = -1, **kwargs):
if name != "l2_distance":
raise ValueError(f"Invalid comparison name: {name}")
def _l2(val_one: Tensor, val_two: Tensor) -> Tensor:
return torch.sum((val_one - val_two) ** 2, dim=dim)
def _create_comparison(
val_one: TensorOrCollectionType, val_two: TensorOrCollectionType
) -> TensorOrCollectionType:
return recursive_combine(val_one, val_two, _l2)
return _create_comparison | null |
21,465 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_combine(
val_one: TensorOrCollectionType,
val_two: TensorOrCollectionType,
func: Callable[[Tensor, Tensor], Tensor],
):
if type(val_one) != type(val_two):
raise ValueError(
f"val_one type of {type(val_one)} must match "
f"val_two type of {type(val_two)}"
)
if isinstance(val_one, Tensor):
return func(val_one, val_two)
if isinstance(val_one, Sequence):
return [
recursive_combine(item_one, item_two, func)
for item_one, item_two in zip(val_one, val_two)
]
if isinstance(val_one, dict):
return {
key: recursive_combine(val_one[key], val_two[key], func)
for key in val_one.keys()
}
raise ValueError(f"Unsupported type for recursive_combine: {type(val_one)}")
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def inner_product_comparison(name: str, dim: int = -1, **kwargs):
if name != "inner_product":
raise ValueError(f"Invalid comparison name: {name}")
def _inner_product(val_one: Tensor, val_two: Tensor) -> Tensor:
return torch.sum(val_one * val_two, dim=dim)
def _create_comparison(
val_one: TensorOrCollectionType, val_two: TensorOrCollectionType
) -> TensorOrCollectionType:
return recursive_combine(val_one, val_two, _inner_product)
return _create_comparison | null |
21,466 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_combine(
val_one: TensorOrCollectionType,
val_two: TensorOrCollectionType,
func: Callable[[Tensor, Tensor], Tensor],
):
if type(val_one) != type(val_two):
raise ValueError(
f"val_one type of {type(val_one)} must match "
f"val_two type of {type(val_two)}"
)
if isinstance(val_one, Tensor):
return func(val_one, val_two)
if isinstance(val_one, Sequence):
return [
recursive_combine(item_one, item_two, func)
for item_one, item_two in zip(val_one, val_two)
]
if isinstance(val_one, dict):
return {
key: recursive_combine(val_one[key], val_two[key], func)
for key in val_one.keys()
}
raise ValueError(f"Unsupported type for recursive_combine: {type(val_one)}")
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def cosine_similarity_comparison(name: str, dim: int = -1, **kwargs):
if name != "cosine_similarity":
raise ValueError(f"Invalid comparison name: {name}")
def _cosine_similarity(val_one: Tensor, val_two: Tensor) -> Tensor:
return torch.sum(val_one * val_two, dim=dim) / (
torch.norm(val_one, dim=dim) * torch.norm(val_two, dim=dim)
)
def _create_comparison(
val_one: TensorOrCollectionType, val_two: TensorOrCollectionType
) -> TensorOrCollectionType:
return recursive_combine(val_one, val_two, _cosine_similarity)
return _create_comparison | null |
21,467 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_combine(
val_one: TensorOrCollectionType,
val_two: TensorOrCollectionType,
func: Callable[[Tensor, Tensor], Tensor],
):
if type(val_one) != type(val_two):
raise ValueError(
f"val_one type of {type(val_one)} must match "
f"val_two type of {type(val_two)}"
)
if isinstance(val_one, Tensor):
return func(val_one, val_two)
if isinstance(val_one, Sequence):
return [
recursive_combine(item_one, item_two, func)
for item_one, item_two in zip(val_one, val_two)
]
if isinstance(val_one, dict):
return {
key: recursive_combine(val_one[key], val_two[key], func)
for key in val_one.keys()
}
raise ValueError(f"Unsupported type for recursive_combine: {type(val_one)}")
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def kl_divergence_comparison(
name: str, dim: int = -1, temperature: float = 1.0, **kwargs
):
if name != "kl_divergence":
raise ValueError(f"Invalid comparison name: {name}")
def _kl_divergence(val_one: Tensor, val_two: Tensor) -> Tensor:
val_one = val_one / temperature
val_two = val_two / temperature
return torch.sum(val_one * torch.log(val_one / val_two), dim=dim)
def _create_comparison(
val_one: TensorOrCollectionType, val_two: TensorOrCollectionType
) -> TensorOrCollectionType:
return recursive_combine(val_one, val_two, _kl_divergence)
return _create_comparison | null |
21,468 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_combine(
val_one: TensorOrCollectionType,
val_two: TensorOrCollectionType,
func: Callable[[Tensor, Tensor], Tensor],
):
if type(val_one) != type(val_two):
raise ValueError(
f"val_one type of {type(val_one)} must match "
f"val_two type of {type(val_two)}"
)
if isinstance(val_one, Tensor):
return func(val_one, val_two)
if isinstance(val_one, Sequence):
return [
recursive_combine(item_one, item_two, func)
for item_one, item_two in zip(val_one, val_two)
]
if isinstance(val_one, dict):
return {
key: recursive_combine(val_one[key], val_two[key], func)
for key in val_one.keys()
}
raise ValueError(f"Unsupported type for recursive_combine: {type(val_one)}")
def cross_entropy_comparison(
name: str, temperature: float = 1.0, reduction: str = "none", **kwargs
):
if name != "cross_entropy":
raise ValueError(f"Invalid projection name: {name}")
def _cross_entropy(val_one: Tensor, val_two: Tensor) -> Tensor:
val_one = val_one / temperature
val_two = val_two / temperature
return TF.cross_entropy(val_one, val_two, reduction=reduction)
def _create_comparison(
val_one: TensorOrCollectionType, val_two: TensorOrCollectionType
) -> TensorOrCollectionType:
return recursive_combine(val_one, val_two, _cross_entropy)
return _create_comparison | null |
21,469 | import re
from typing import Callable, Dict, Sequence, Tuple, Union
import torch
import torch.nn.functional as TF
from torch import Tensor
from torch.nn import Module
from sparseml.core import State
TensorOrCollectionType = Union[Tensor, Sequence[Tensor], Dict[str, Tensor]]
def recursive_combine(
val_one: TensorOrCollectionType,
val_two: TensorOrCollectionType,
func: Callable[[Tensor, Tensor], Tensor],
):
if type(val_one) != type(val_two):
raise ValueError(
f"val_one type of {type(val_one)} must match "
f"val_two type of {type(val_two)}"
)
if isinstance(val_one, Tensor):
return func(val_one, val_two)
if isinstance(val_one, Sequence):
return [
recursive_combine(item_one, item_two, func)
for item_one, item_two in zip(val_one, val_two)
]
if isinstance(val_one, dict):
return {
key: recursive_combine(val_one[key], val_two[key], func)
for key in val_one.keys()
}
raise ValueError(f"Unsupported type for recursive_combine: {type(val_one)}")
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def square_head_comparison(name: str, **kwargs):
if name != "square_head":
raise ValueError(f"Invalid projection name: {name}")
def _square_head(val_one: Tensor, val_two: Tensor) -> Tensor:
numerator = torch.sum(torch.square(val_two - val_one))
denominator = torch.sum(torch.square(val_two))
return numerator / denominator
def _create_comparison(
val_one: TensorOrCollectionType, val_two: TensorOrCollectionType
) -> TensorOrCollectionType:
return recursive_combine(val_one, val_two, _square_head)
return _create_comparison | null |
21,470 | import logging
import operator
from collections import defaultdict
from math import ceil
from typing import List, Optional
import torch
from torch.nn.modules.sparse import Embedding
_LOGGER = logging.getLogger(__name__)
def ppl_eval_general(
eval_logits, model, dataloader, dev, nsamples=None, max_samples_per_iteration=128
):
_LOGGER.info("Evaluating perplexity...")
if nsamples is None:
nsamples = len(dataloader)
number_iterations = int(ceil(nsamples / max_samples_per_iteration))
neg_log_likelihood = 0.0
number_tokens = 0
for iteration in range(number_iterations):
if iteration < number_iterations - 1:
samples = dataloader[
iteration
* max_samples_per_iteration : (iteration + 1)
* max_samples_per_iteration
]
else:
samples = dataloader[iteration * max_samples_per_iteration :]
logits = eval_logits(model, samples, dev)
vocabulary_size = logits[0].shape[-1]
logits = [logit[:, :-1, :].view(-1, vocabulary_size) for logit in logits]
logits = torch.cat(logits, dim=0).contiguous().to(torch.float32)
labels = [sample[:, 1:].view(-1) for sample in samples]
labels = torch.cat(labels, dim=0).to(dev)
neg_log_likelihood += torch.nn.functional.cross_entropy(
logits,
labels,
reduction="sum",
)
number_tokens += labels.numel()
_LOGGER.info(torch.exp(neg_log_likelihood / number_tokens))
ppl = torch.exp(neg_log_likelihood / number_tokens)
_LOGGER.info(f"Perplexity: {ppl.item():3f}")
return ppl.item() | null |
21,471 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.utils import get_layer
class _BNWrapper(Module):
"""
Wraps BatchNormalization module to expose methods needed to enable
freezing/unfreezing of statistics
:param module: BatchNormalization module to be wrapped
"""
def __init__(self, module: Module):
super().__init__()
self.bn = module
self.freeze_bn = False
def running_mean(self):
return self.bn.running_mean
def running_mean(self, value):
self.bn.running_mean = value
def running_var(self):
return self.bn.running_var
def running_var(self, value):
self.bn.running_var = value
def weight(self):
return self.bn.weight
def weight(self, value):
self.bn.weight = value
def bias(self):
return self.bn.bias
def bias(self, value):
self.bn.bias = value
def gamma(self):
return self.bn.gamma
def gamma(self, value):
self.bn.gamma = value
def beta(self):
return self.bn.beta
def beta(self, value):
self.bn.beta = value
def num_batches_tracked(self):
return self.bn.num_batches_tracked
def num_batches_tracked(self, value):
self.bn.num_batches_tracked = value
def eps(self):
return self.bn.eps
def eps(self, value):
self.bn.eps = value
def momentum(self):
return self.bn.momentum
def momentum(self, value):
self.bn.momentum = value
def forward(self, x):
return self.bn(x)
def freeze_bn_stats(self):
self.freeze_bn = True
self.bn.training = False
return self
def reset_running_stats(self):
self.bn.reset_running_stats()
def train(self, mode=True):
if not self.freeze_bn:
self.bn.train(mode)
return self
def update_bn_stats(self):
self.freeze_bn = False
self.bn.training = True
return self
The provided code snippet includes necessary dependencies for implementing the `configure_module_bn_wrappers` function. Write a Python function `def configure_module_bn_wrappers(module: Module)` to solve the following problem:
Wrap any BatchNormalization modules that are not fused with convolutions with BNWrapper to enable freezing/unfreezing of BN statistics :param module: module to potentially wrap the submodules of
Here is the function:
def configure_module_bn_wrappers(module: Module):
"""
Wrap any BatchNormalization modules that are not fused with convolutions
with BNWrapper to enable freezing/unfreezing of BN statistics
:param module: module to potentially wrap the submodules of
"""
# wrap any children of the given module as a QATWrapper if required
if not hasattr(module, "freeze_bn_stats"):
for child_name, child_module in module.named_children():
if type(child_module) in [
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
]:
setattr(module, child_name, _BNWrapper(child_module))
# recurse on child module
configure_module_bn_wrappers(child_module) | Wrap any BatchNormalization modules that are not fused with convolutions with BNWrapper to enable freezing/unfreezing of BN statistics :param module: module to potentially wrap the submodules of |
21,472 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.utils import get_layer
class QConfigProperties:
"""
Dataclass that stores properties needed to define qconfig objects.
Default values set here.
:param symmetric_activations: if True, activations will have a symmetric
quantization range with a pre-specified zero point
(0 if activation_dtype=torch.qint8, 128 if activation_dtype=torch.quint8).
Default is False.
:param symmetric_weights: if True, weights will have a symmetric
quantization range with a pre-specified zero point
(0 if weight_dtype=torch.qint8, 128 if weight_dtype=torch.quint8).
Default is True.
:param reduce_range: if True, the quantization range will be reduced by one bit.
This may prevent overflow issues with model execution on certain hardware.
Default is False.
:param activation_qconfig_kwargs: Additional kwargs for quantization of
activations.
:param weight_qconfig_kwargs: Additional kwargs for quantization of
weights.
:param activation_dtype: quantized activation data type.
Default is torch.quint8.
:param weight_dtype: quantized weights data type.
Default is torch.qint8.
:param activation_bits: number of bits for activations. Default is 8.
:param weight_bits: number of bits for weights. Default is 8.
:param activation_strategy: "tensor" to quantize over the whole activation tensor,
or "channel" to quantize per channel. Default is "tensor"
:param weight_strategy: "tensor" to quantize over the whole weight tensor, or
"channel" to quantize per channel. Default is "tensor"
:param tensorrt: if True sets quantization configuration for compatibility with
explict quantization as supported by TensorRT 8.2.
"""
_symmetric_activations: bool = False
_symmetric_weights: bool = True
reduce_range: bool = False
activation_dtype: torch.dtype = torch.quint8
weight_dtype: torch.dtype = torch.qint8
activation_bits: int = 8
weight_bits: int = 8
activation_strategy: str = "tensor"
weight_strategy: str = "tensor"
activation_qconfig_kwargs: Dict[str, Any] = field(default_factory=dict)
weight_qconfig_kwargs: Dict[str, Any] = field(default_factory=dict)
tensorrt: bool = False
def symmetric_activations(self) -> bool:
# always use symmetric activations in tensorrt mode
return self.tensorrt or self._symmetric_activations
def symmetric_activations(self, value: bool):
self._symmetric_activations = value
def symmetric_weights(self) -> bool:
return self.tensorrt or self._symmetric_weights
def symmetric_weights(self, value: bool):
self._symmetric_weights = value
class QATWrapper(Module):
"""
Wraps inputs and outputs of a Module or function with QuantStubs for
Quantization-Aware-Training (QAT)
:param forward_fn: function to be wrapped, should generally accept and return
torch Tensor(s)
:param num_inputs: number of inputs of the forward function to add a QuantStub
to. Will wrap the first num_inputs ordered inputs of the function. Default
is 1
:param kwarg_input_names: list of names of key word arguments to the forward pass
that should be wrapped with a fake quantize operation. Defaults to empty
:param num_outputs: number of outputs of the forward function to add a QuantStub
to. Will wrap the first num_inputs ordered outputs of the function. Default
is 1. Will also add a DeQuantStub for FP32 conversion if
torch.quantization.convert is invoked
:param input_qconfigs: QConfig to use for calibrating the input QuantStubs. Can
be a single QConfig that will be copied to each QuantStub or a list of one
QConfig for each input. Instead of a QConfig objects, the string 'asymmetric'
or 'symmetric' may be used to use default UINT8 asymmetric and symmetric
quantization respectively
:param output_qconfigs: QConfig to use for calibrating the output QuantStubs. Can
be a single QConfig that will be copied to each QuantStub or a list of one
QConfig for each output. Instead of a QConfig objects, the string 'asymmetric'
or 'symmetric' may be used to use default UINT8 asymmetric and symmetric
quantization respectively
:param qproperties: properties used to define QConfig. may also be a quantization
scheme
"""
def from_module(
module: Module,
qproperties: Union[QConfigProperties, QuantizationScheme],
) -> "QATWrapper":
"""
:param module: torch Module to create a QATWrapper for
:return: QATWrapper object created using the given Module as the forward
function. Will attempt to find any other named parameter of the QATWrapper
constructor from the attributes of the given Module
"""
qat_wrapper_kwargs = (
module.qat_wrapper_kwargs or {}
if hasattr(module, "qat_wrapper_kwargs")
else {}
)
# Remove qconfig from wrapped layer to avoid duplicate quantization
module.qconfig = None
return QATWrapper(
forward_fn=module, qproperties=qproperties, **qat_wrapper_kwargs
)
def __init__(
self,
forward_fn: Callable[[Any], Any],
qproperties: Union[QConfigProperties, QuantizationScheme],
num_inputs: int = 1,
kwarg_input_names: List[str] = None,
num_outputs: int = 1,
input_qconfigs: Union[
"torch.quantization.QConfig", str, List["torch.quantization.QConfig"]
] = "asymmetric",
output_qconfigs: Union[
"torch.quantization.QConfig", str, List["torch.quantization.QConfig"]
] = "asymmetric",
):
super().__init__()
if torch_quantization is None:
raise RuntimeError(
"Unable to import package torch.quantization. "
"Try upgrading your PyTorch version to >= 1.7.0."
)
if not callable(forward_fn):
raise ValueError(
"forward_fn of QATWrapper must be callable. "
f"Received {type(forward_fn)}"
)
self.kwarg_input_names = kwarg_input_names or []
num_input_quant_stubs = num_inputs + len(self.kwarg_input_names)
self.forward_fn = forward_fn
# Add weight qconfig to forward_fn (in case it has weights)
qconfig_ = (
get_qat_qconfig(qproperties)
if isinstance(qproperties, QConfigProperties)
else qproperties.get_qconfig() # QuantizationScheme
)
qconfig = torch_quantization.QConfig(
activation=torch.nn.Identity,
weight=qconfig_.weight,
)
self.forward_fn.qconfig = qconfig
self.input_qconfigs = self._load_qconfigs(
name="input_qconfigs",
expected_len=num_input_quant_stubs,
qconfigs=input_qconfigs,
qproperties=qproperties,
)
self.output_qconfigs = self._load_qconfigs(
name="output_qconfigs",
expected_len=num_outputs,
qconfigs=output_qconfigs,
qproperties=qproperties,
)
self.input_quant_stubs = torch.nn.ModuleList(
[torch_quantization.QuantStub() for _ in range(num_input_quant_stubs)]
)
self.output_quant_stubs = torch.nn.ModuleList(
[torch_quantization.QuantStub() for _ in range(num_outputs)]
)
self.output_dequant_stubs = torch.nn.ModuleList(
[torch_quantization.DeQuantStub() for _ in range(num_outputs)]
)
def forward(self, *args, **kwargs) -> Any:
"""
:param args: arguments to forward function; the first num_inputs of these args
will be wrapped by a QuantStub
:param kwargs: key word arguments to pass to the wrapped forward function
:return: outputs of the forward function with a QuantStub applied to the first
num_outputs outputs
"""
if any(kwarg not in kwargs for kwarg in self.kwarg_input_names):
raise ValueError(
f"QATWrapper expected kwargs {self.kwarg_input_names} to be included "
f"in forward function kwargs. Found {list(kwargs.keys())}. missing "
f"{[kwarg for kwarg in self.kwarg_input_names if kwarg not in kwargs]}"
)
qat_args = []
# fake quantize positional arguments
num_args_stubs = len(self.input_quant_stubs) - len(self.kwarg_input_names)
for idx, arg in enumerate(args):
if idx < num_args_stubs:
arg = self.input_quant_stubs[idx](arg)
qat_args.append(arg)
# fake quantize key word arguments
for idx, kwarg in enumerate(self.kwarg_input_names):
kwargs[kwarg] = self.input_quant_stubs[num_args_stubs + idx](kwargs[kwarg])
# wrapped forward pass
outputs = self.forward_fn(*qat_args, **kwargs)
if len(self.output_quant_stubs) == 0:
# no output wrapping
return outputs
if isinstance(outputs, torch.Tensor):
if len(self.output_quant_stubs) > 1:
raise ValueError(
f"QATWrapper expected {len(self.output_quant_stubs)} outputs in "
"forward pass. Found one output"
)
# output is a single Tensor
qat_output = self.output_quant_stubs[0](outputs)
return self.output_dequant_stubs[0](qat_output)
qat_outputs = []
for idx, output in enumerate(outputs):
if idx < len(self.output_quant_stubs):
output = self.output_quant_stubs[idx](output)
output = self._output_deuant_stubs[idx](output)
qat_outputs.append(output)
return qat_outputs
def configure_qconfig(self):
"""
Sets the qconfigs of the quant stubs to the pre-initialized QConfigs
"""
for quant_stub, qconfig in zip(self.input_quant_stubs, self.input_qconfigs):
quant_stub.qconfig = qconfig
if hasattr(qconfig, "quantization_stub"):
quant_stub.quantization_stub = qconfig.quantization_stub
for quant_stub, qconfig in zip(self.output_quant_stubs, self.output_qconfigs):
quant_stub.qconfig = qconfig
if hasattr(qconfig, "quantization_stub"):
quant_stub.quantization_stub = qconfig.quantization_stub
def _load_qconfigs(
name: str,
expected_len: int,
qconfigs: Union["QConfig", str, List["QConfig"]], # noqa: F821
qproperties: QConfigProperties,
):
if not isinstance(qconfigs, (str, torch_quantization.QConfig, List)):
raise ValueError(
f"QATWrapper {name} must be a string, torch.quantization.QConfig, "
f"or a List of them. Received a {type(qconfigs)}"
)
if isinstance(qconfigs, (str, torch_quantization.QConfig)):
qconfigs = [deepcopy(qconfigs) for _ in range(expected_len)]
if len(qconfigs) != expected_len:
raise ValueError(
f"QATWrapper {name} should have exactly one qconfig or one for every "
f"argument ({expected_len}). Given {len(qconfigs)}"
)
valid_qconfig_strs = ["asymmetric", "symmetric"]
for idx, qconfig in enumerate(qconfigs):
if not isinstance(qconfig, str):
continue
if qconfig not in valid_qconfig_strs:
raise ValueError(
"QATWrapper qconfig names can either be "
"torch.quantization.QConfig objects or a string "
f"in {valid_qconfig_strs} that will be converted to a QConfig. "
f"Found string with value {qconfig} in {name}"
)
qconfig_idx = None
if isinstance(qproperties, QConfigProperties):
qproperties_idx = deepcopy(qproperties)
qproperties_idx.symmetric_activations = qconfig == "symmetric"
qconfig_idx = get_qat_qconfig(qproperties_idx)
else:
scheme_idx = deepcopy(qproperties)
symmetric = qconfig == "symmetric"
# always use output_activations of scheme because the activations
# of the QuantStub() are the ones tracked
if scheme_idx.output_activations is not None:
scheme_idx.input_activations.symmetric = symmetric
else:
scheme_idx.output_activations = QuantizationArgs(
symmetric=symmetric
)
qconfig_idx = scheme_idx.get_qconfig()
qconfig_idx.quantization_scheme = scheme_idx
qconfigs[idx] = qconfig_idx
return qconfigs
The provided code snippet includes necessary dependencies for implementing the `configure_module_qat_wrappers` function. Write a Python function `def configure_module_qat_wrappers( module: Module, qproperties: QConfigProperties, )` to solve the following problem:
if any submodule of the given module has the attribute wrap_qat == True, then it will be replaced by a QATWrapper of it created by QATWrapper.from_module. Other named kwargs to the QATWrapper constructor must be contained in a dictionary under an attributed named `qat_wrapper_kwargs` :param module: module to potentially wrap the submodules of :param qproperties: properties used to define QConfig.
Here is the function:
def configure_module_qat_wrappers(
module: Module,
qproperties: QConfigProperties,
):
"""
if any submodule of the given module has the attribute wrap_qat == True,
then it will be replaced by a QATWrapper of it created by QATWrapper.from_module.
Other named kwargs to the QATWrapper constructor must be contained in a dictionary
under an attributed named `qat_wrapper_kwargs`
:param module: module to potentially wrap the submodules of
:param qproperties: properties used to define QConfig.
"""
# wrap any children of the given module as a QATWrapper if required
for child_name, child_module in module.named_children():
if hasattr(child_module, "wrap_qat") and child_module.wrap_qat:
setattr(
module,
child_name,
QATWrapper.from_module(
module=child_module,
qproperties=qproperties,
),
)
# recurse on child module
configure_module_qat_wrappers(
module=child_module,
qproperties=qproperties,
) | if any submodule of the given module has the attribute wrap_qat == True, then it will be replaced by a QATWrapper of it created by QATWrapper.from_module. Other named kwargs to the QATWrapper constructor must be contained in a dictionary under an attributed named `qat_wrapper_kwargs` :param module: module to potentially wrap the submodules of :param qproperties: properties used to define QConfig. |
21,473 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.utils import get_layer
_QUANTIZABLE_MODULE_TYPES = (
{
# Conv based layers
torch.nn.Conv1d,
torch.nn.Conv2d,
torch.nn.Conv3d,
nni.ConvBn1d,
nni.ConvBn2d,
nni.ConvBn3d,
nni.ConvReLU1d,
nni.ConvReLU2d,
nni.ConvReLU3d,
nni.ConvBnReLU1d,
nni.ConvBnReLU2d,
nni.ConvBnReLU3d,
# Linear Layers
torch.nn.Linear,
nni.LinearReLU,
}
if nni # nni will always import if torch.quantization is available
else None
)
The provided code snippet includes necessary dependencies for implementing the `add_quant_dequant` function. Write a Python function `def add_quant_dequant( module: torch.nn.Module, name=None, parent_module=None, layer_class_names=None )` to solve the following problem:
Wraps all Conv and Linear submodule with a qconfig with a QuantWrapper :param module: the module to modify :param name: name of the module to modify; default to None :param parent_module: parent module containing the module to modify; default to None :param layer_class_names: list of module class names to be added to the list of quantizable modules :return: the modified module
Here is the function:
def add_quant_dequant(
module: torch.nn.Module, name=None, parent_module=None, layer_class_names=None
):
"""
Wraps all Conv and Linear submodule with a qconfig with a QuantWrapper
:param module: the module to modify
:param name: name of the module to modify; default to None
:param parent_module: parent module containing the module to modify; default to None
:param layer_class_names: list of module class names to be added to the
list of quantizable modules
:return: the modified module
"""
named_children = module.named_children()
is_quantizable = type(module) in _QUANTIZABLE_MODULE_TYPES
if layer_class_names:
is_quantizable = (
is_quantizable or module.__class__.__name__ in layer_class_names
)
if is_quantizable and hasattr(module, "qconfig") and module.qconfig:
module = torch_quantization.QuantWrapper(module)
if parent_module is not None and len(list(named_children)) <= 0:
if "." in name:
# unwrap name under parent module, nested through multiple submodules
name_parts = name.split(".")
for name_part in name_parts[:-1]:
parent_module = getattr(parent_module, name_part)
name = name_parts[-1]
# set parent module child to the newly wrapped module
setattr(parent_module, name, module)
else:
for name, child in named_children:
setattr(
module,
name,
add_quant_dequant(child, layer_class_names=layer_class_names),
)
return module | Wraps all Conv and Linear submodule with a qconfig with a QuantWrapper :param module: the module to modify :param name: name of the module to modify; default to None :param parent_module: parent module containing the module to modify; default to None :param layer_class_names: list of module class names to be added to the list of quantizable modules :return: the modified module |
21,474 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.utils import get_layer
The provided code snippet includes necessary dependencies for implementing the `remove_activation_qat_by_layer_name` function. Write a Python function `def remove_activation_qat_by_layer_name(module: Module, layer_class_names: List[str])` to solve the following problem:
Disables fake quantization of activations for all submodules of the given module with class name layer_class_names :param module: module to remove activation fake quantization for certain layers :param layer_class_names: list of layer class names that should be affected. e.x. ["Linear"]
Here is the function:
def remove_activation_qat_by_layer_name(module: Module, layer_class_names: List[str]):
"""
Disables fake quantization of activations for all submodules of the given module
with class name layer_class_names
:param module: module to remove activation fake quantization for certain layers
:param layer_class_names: list of layer class names that should be affected.
e.x. ["Linear"]
"""
for submodule in module.modules():
if submodule.__class__.__name__ in layer_class_names and hasattr(
submodule, "qconfig"
):
submodule.qconfig = torch_quantization.QConfig(
activation=torch.nn.Identity,
weight=submodule.qconfig.weight,
) | Disables fake quantization of activations for all submodules of the given module with class name layer_class_names :param module: module to remove activation fake quantization for certain layers :param layer_class_names: list of layer class names that should be affected. e.x. ["Linear"] |
21,475 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.utils import get_layer
def freeze_bn_stats(module: Module):
if hasattr(module, "freeze_bn_stats"):
module.freeze_bn_stats() | null |
21,476 | from copy import deepcopy
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.intrinsic as nni
from packaging import version
from torch import quantization as torch_quantization
from torch.nn import BatchNorm2d, Conv2d, Embedding, Module, ReLU
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationArgs,
QuantizationScheme,
get_observer,
)
from sparseml.pytorch.nn import ReLU as ReLU_nm
from sparseml.pytorch.utils import get_layer
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
def _delete_get_block_hooks(
module: Module,
fuse_blocks: List[List[str]],
) -> List[Tuple[Any, Any]]:
block_hooks = []
for block in fuse_blocks:
pre_hooks = []
post_hooks = []
for name in block:
# get Module objects in block by their names
m = get_layer(name, module)
# extract the hooks
pre_hooks.extend(m._forward_pre_hooks.values())
post_hooks.extend(m._forward_hooks.values())
# de-register the hooks from this module
m._forward_pre_hooks.clear()
m._forward_hooks.clear()
block_hooks.append((pre_hooks, post_hooks))
return block_hooks
def _add_fused_block_hooks(module: Module, block_hooks: List[Tuple[Any, Any]]):
fused_modules = [
mod for mod in module.modules() if isinstance(mod, _FUSED_MODULE_TYPES)
]
if len(fused_modules) != len(block_hooks):
raise RuntimeError(
f"Number of fused modules ({len(fused_modules)}) after layer fusion in "
f"module {module.__class__.__name__}. does not match expected "
f"({len(block_hooks)}). Module may have already been fused or block "
"skipped during torch.quantization.fuse_modules"
)
for fused_module, (pre_hooks, post_hooks) in zip(fused_modules, block_hooks):
for pre_hook in pre_hooks:
fused_module.register_forward_pre_hook(pre_hook)
for post_hook in post_hooks:
fused_module.register_forward_hook(post_hook)
def _set_submodule(root_module: Module, sub_module_path, sub_module: Module):
sub_module.training = root_module.training
current_module = root_module
sub_module_path = sub_module_path.split(".")
for child_module in sub_module_path[:-1]:
current_module = getattr(current_module, child_module)
setattr(current_module, sub_module_path[-1], sub_module)
def _wrap_bn_sub_class(bn_subclass, override_forward=True):
batch_norm = BatchNorm2d(bn_subclass.num_features)
batch_norm.__dict__ = bn_subclass.__dict__
if override_forward:
batch_norm.forward = bn_subclass.forward
del bn_subclass
return batch_norm
The provided code snippet includes necessary dependencies for implementing the `fuse_module_conv_bn_relus` function. Write a Python function `def fuse_module_conv_bn_relus( module: Module, inplace: bool = True, override_bn_subclasses_forward: Union[bool, str] = True, ) -> Module` to solve the following problem:
Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the given module. To be fused, these layers must appear sequentially in module.named_modules() and be in the same submodule. Fuses either Conv2d -> BatchNorm2d, Conv2d -> ReLU, or Conv2d -> BatchNorm2d -> ReLU blocks If this function does not fuse the model in the desired way, implement an in place fusing function for the model. :param module: the module to fuse :param inplace: set True to perform fusions in-place. default is True :param override_bn_subclasses_forward: if True, modules that are subclasses of BatchNorm2d will be modified to be BatchNorm2d but with the forward pass and state variables copied from the subclass. This is so these BN modules can pass PyTorch type checking when fusing. Can set to "override-only" and only parameters will be overwritten, not the forward pass. Default is True :return: the fused module
Here is the function:
def fuse_module_conv_bn_relus(
module: Module,
inplace: bool = True,
override_bn_subclasses_forward: Union[bool, str] = True,
) -> Module:
"""
Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the
given module. To be fused, these layers must appear sequentially in
module.named_modules() and be in the same submodule.
Fuses either Conv2d -> BatchNorm2d, Conv2d -> ReLU, or
Conv2d -> BatchNorm2d -> ReLU blocks
If this function does not fuse the model in the desired way, implement an
in place fusing function for the model.
:param module: the module to fuse
:param inplace: set True to perform fusions in-place. default is True
:param override_bn_subclasses_forward: if True, modules that are subclasses of
BatchNorm2d will be modified to be BatchNorm2d but with the forward
pass and state variables copied from the subclass. This is so these
BN modules can pass PyTorch type checking when fusing. Can set to
"override-only" and only parameters will be overwritten, not the
forward pass. Default is True
:return: the fused module
"""
if torch_quantization is None:
raise RuntimeError(
"Unable to import package torch.quantization. "
"Try upgrading your PyTorch version."
)
if not inplace:
module = deepcopy(module)
conv_blocks = []
current_block = []
current_block_submodule_name = ""
for name, layer in module.named_modules():
submodule_name = ".".join(name.split(".")[:-1])
if (
len(current_block) == 1 # [Conv2d]
and isinstance(layer, BatchNorm2d)
and submodule_name == current_block_submodule_name
) or (
len(current_block) in [1, 2] # [Conv2d] or [Conv2d, BatchNorm2d]
and isinstance(layer, ReLU)
and not isinstance(current_block[-1], ReLU)
and submodule_name == current_block_submodule_name
):
if isinstance(layer, ReLU_nm):
_set_submodule(module, name, ReLU(inplace=layer.inplace))
if isinstance(layer, BatchNorm2d) and not type(layer) is BatchNorm2d:
if not override_bn_subclasses_forward:
raise RuntimeError(
"Detected a Conv-BN block that uses a subclass of BatchNorm2d. "
"This will cause a type error when fusing with PyTorch, "
"set override_bn_subclasses_forward to True or 'override-only "
"to modify this BN subclass to be a BatchNorm2d object"
)
# swap BN subclass with overwritten BN class that will pass torch
# type checking
overwritten_bn = _wrap_bn_sub_class(
layer,
override_forward=override_bn_subclasses_forward != "override-only",
)
_set_submodule(module, name, overwritten_bn),
current_block.append(name)
else:
if current_block:
if len(current_block) > 1: # cannot fuse single module
conv_blocks.append(current_block)
current_block = []
current_block_submodule_name = ""
if isinstance(layer, Conv2d):
current_block.append(name)
current_block_submodule_name = submodule_name
if len(current_block) > 1:
conv_blocks.append(current_block)
if conv_blocks:
# manually save and move hooks surrounding fused blocks
# into new fused modules due to torch.quantization
# error when a module has more than one hook
block_hooks = _delete_get_block_hooks(module, conv_blocks)
# run torch fusion
if _PARSED_TORCH_VERSION < version.parse("1.10.0"):
torch_quantization.fuse_modules(module, conv_blocks, inplace=True)
else:
if module.training:
torch.ao.quantization.fuse_modules_qat(
module, conv_blocks, inplace=True
)
else:
torch.ao.quantization.fuse_modules(module, conv_blocks, inplace=True)
# add hooks back
_add_fused_block_hooks(module, block_hooks)
return module | Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the given module. To be fused, these layers must appear sequentially in module.named_modules() and be in the same submodule. Fuses either Conv2d -> BatchNorm2d, Conv2d -> ReLU, or Conv2d -> BatchNorm2d -> ReLU blocks If this function does not fuse the model in the desired way, implement an in place fusing function for the model. :param module: the module to fuse :param inplace: set True to perform fusions in-place. default is True :param override_bn_subclasses_forward: if True, modules that are subclasses of BatchNorm2d will be modified to be BatchNorm2d but with the forward pass and state variables copied from the subclass. This is so these BN modules can pass PyTorch type checking when fusing. Can set to "override-only" and only parameters will be overwritten, not the forward pass. Default is True :return: the fused module |
21,477 | from typing import Dict, List, Optional
import torch
from packaging import version
from torch.nn import Identity, Module
from sparseml.modifiers.quantization.utils.constants import (
FUSED_MODULE_NAMES,
NON_QUANTIZABLE_MODULE_NAMES,
)
from sparseml.modifiers.quantization.utils.fake_quant_wrapper import FakeQuantizeWrapper
from sparseml.modifiers.quantization.utils.helpers import (
QATWrapper,
configure_module_default_qconfigs,
prepare_embeddings_qat,
)
from sparseml.modifiers.quantization.utils.quantization_scheme import QuantizationScheme
from sparseml.pytorch.utils import get_layer
from sparseml.utils.fsdp.context import fix_fsdp_module_name
def is_qat_helper_module(module: Module) -> bool:
"""
:param module: module to check
:return: True if module is an instance of a torch QAT helper class
"""
# prefer FakeQuantizeBase which was introduced around torch 1.9
fake_quantize_class = getattr(
torch_quantization, "FakeQuantizeBase", torch_quantization.FakeQuantize
)
return isinstance(
module,
(
fake_quantize_class,
FakeQuantizeWrapper,
torch_quantization.ObserverBase,
torch_quantization.DeQuantStub,
torch_quantization.QuantStub,
Identity,
),
)
def is_quantizable_module(
module: Module,
exclude_module_types: Optional[List[str]] = None,
) -> bool:
"""
:param module: module to check
:param exclude_module_types: string names of modules to not include for
quantization. Default None
:return: boolean value if the module is quantizable. Module is considered
quantizable if its type is not included in exclude_module_types or
NON_QUANTIZABLE_MODULE_NAMES and
it either has no module children outside of QAT or is a torch qat fused module
"""
# considers any non-excluded "leaf level" (no children) submodule
# to be quantizable as well as torch fused modules
# add all default excluded module type names
exclude_module_types = set(exclude_module_types or [])
exclude_module_types.update(NON_QUANTIZABLE_MODULE_NAMES)
module_type_name = module.__class__.__name__
if module_type_name in exclude_module_types:
return False
return (
module_type_name in FUSED_MODULE_NAMES
or all(
# no children (leaf modules) evaluate to all([]) - (True)
is_qat_helper_module(child)
for child in module.children()
)
or isinstance(module, torch_quantization.QuantWrapper)
)
def raise_if_already_quantized(module_name: str, module: Module):
"""
:param module_name: name of module to check for quantization
:param module: module to check for quantization
:raises: RuntimeError if module is already quantized, it cannot be re-quantized
"""
if is_module_quantized(module):
raise RuntimeError(
f"Unable to quantize module {module_name}, as it has already been "
"quantized. Ensure your input recipe does not contain multiple "
"QuantizationModifiers that act on the same module. "
)
def _match_submodule_name_or_type(
submodule: Module, submodule_name: str, names_or_types: List[str]
) -> Optional[str]:
# match preferences:
# 1. match module type name
# 2. match the submodule prefix (longest first)
submodule_match = ""
for name_or_type in names_or_types:
name_to_compare = submodule_name[:]
name_to_compare = fix_fsdp_module_name(name_to_compare)
if name_to_compare.startswith("module."):
name_to_compare = name_to_compare[7:]
if name_or_type == submodule.__class__.__name__:
# type match, return type name
return name_or_type
if name_to_compare.startswith(name_or_type) and (
len(name_or_type) > len(submodule_match)
):
# match to most specific submodule name
submodule_match = name_or_type
return submodule_match or None # return None if no match
def _inject_qat_wrapper(
root_module: Module,
target_submodule_name: str,
quantization_scheme: QuantizationScheme,
):
submodule_name_parts = target_submodule_name.split(".")
parent_name = ".".join(submodule_name_parts[:-1])
parent_module = get_layer(parent_name, root_module)
target_module = getattr(parent_module, submodule_name_parts[-1])
wrapped_target_module = QATWrapper.from_module(target_module, quantization_scheme)
setattr(parent_module, submodule_name_parts[-1], wrapped_target_module)
def _validate_set_module_schemes(
model: Module,
scheme_overrides: Optional[Dict[str, QuantizationScheme]] = None,
ignore: Optional[List[str]] = None,
):
def _get_unmatched_types_or_names(types_or_names):
unmatched = []
for type_or_name in types_or_names:
matched = False
for submodule_name, submodule in model.named_modules():
name_to_compare = submodule_name[:]
name_to_compare = fix_fsdp_module_name(name_to_compare)
if name_to_compare.startswith("module."):
name_to_compare = name_to_compare[7:]
if name_to_compare.startswith(type_or_name) or (
submodule.__class__.__name__ == type_or_name
):
matched = True
break
if not matched:
unmatched.append(type_or_name)
return unmatched
def _build_error_str(property_name, unmatched_values):
return (
f"{property_name} contains submodule names or module types "
"that do not match to any submodules in the model. "
f"unmatched values: {unmatched_values}"
)
unmatched_scheme_overrides = _get_unmatched_types_or_names(scheme_overrides)
if unmatched_scheme_overrides:
raise ValueError(
_build_error_str("scheme_overrides", unmatched_scheme_overrides)
)
unmatched_ignore = _get_unmatched_types_or_names(ignore)
if unmatched_ignore:
raise ValueError(_build_error_str("ignore", unmatched_ignore))
class QuantizationScheme(BaseModel):
"""
Class composed of QuantizationArgs to build QConfig and QuantWrapper objects for
quantizing models. Provides a simple user interface for defining how inputs,
weights, and outputs should be quantized
"""
def __init__(self, *args, **kwargs):
# support for loading from yaml str
args = [arg if arg != "null" else None for arg in args]
for key, val in kwargs.items():
if val == "null":
kwargs[key] = None
super().__init__(*args, **kwargs)
input_activations: Optional[QuantizationArgs] = Field(
default_factory=QuantizationArgs.default_activation_args,
description=(
"target quantization setting for input activations. Set to None to "
"not quantize input activations. Default is 8 bits asymmetric"
),
)
weights: Optional[QuantizationArgs] = Field(
default_factory=QuantizationArgs.default_weight_args,
description=(
"target quantization setting for model weights. Set to None to "
"not quantize weights. Default is 8 bits symmetric"
),
)
output_activations: Optional[QuantizationArgs] = Field(
default=None,
description=(
"target quantization setting for output activations. Set to None to "
"not quantize output activations. Default is None"
),
)
target_hardware: Optional[str] = Field(
default=None,
description=(
"target deployment runtime/hardware name to be set by default "
"classmethods. Default is None"
),
)
def load(
cls,
scheme: QuantizationSchemeLoadable,
default: Optional["QuantizationScheme"] = None,
) -> "QuantizationScheme":
"""
:param scheme: QuantizationScheme, dict representation of scheme,
or string alias of a scheme to load. Valid strings:
['default', 'deepsparse', 'tensorrt']
:param default: default QuantizationScheme to override 'default' scheme
with
:return: constructed QuantizationScheme object from the given scheme;
if given a dict, returns QuantizationScheme.parse_obj(scheme), string
input will return the defualt QuantizationScheme if set to 'default'.
"""
if isinstance(scheme, cls):
return scheme
elif scheme is None or scheme == "default":
# if no default override, defaults to QuantizationScheme()
return deepcopy(default) or cls()
elif isinstance(scheme, str):
if scheme == "deepsparse":
return cls.deepsparse()
elif scheme == "tensorrt":
return cls.tensorrt()
raise ValueError(
f"Unrecognized QuantizationScheme string alias {scheme}. "
"Valid strings: ['default', 'deepsparse', 'tensorrt']"
)
elif isinstance(scheme, dict):
# default to dict
scheme = {key: _parse_quantization_arg(arg) for key, arg in scheme.items()}
return cls.parse_obj(scheme)
else:
raise ValueError(
f"Unrecognized type {type(scheme)} for QuantizationScheme.load, "
"expected one of: [QuantizationScheme, Dict, str, None]"
)
def deepsparse(cls) -> "QuantizationScheme":
"""
:return: QuantizationScheme for deepsparse targeted deployments -
int8, symmetric weights, asymmetric inputs, no output quantization
"""
return cls(
input_activations=QuantizationArgs(num_bits=8, symmetric=False),
weights=QuantizationArgs(num_bits=8, symmetric=True),
output_activations=None,
target_hardware="deepsparse",
)
def tensorrt(cls) -> "QuantizationScheme":
"""
:return: QuantizationScheme for tensorrt targeted deployments -
compatibility with explict quantization as supported by TensorRT 8.2:
int8, symmetric for both weights and inputs, no output quantization
"""
return cls(
input_activations=QuantizationArgs(num_bits=8, symmetric=True),
weights=QuantizationArgs(num_bits=8, symmetric=True),
output_activations=None,
target_hardware="tensorrt",
)
def get_qconfig(self) -> "torch.quantization.QConfig":
"""
:return: QConfig for Modules (output activations used,
use QuantWrapper for inputs)
"""
qconfig = _get_qconfig(self.output_activations, self.weights)
# add reference to this quantization scheme for reference
qconfig.quantization_scheme = self
return qconfig
def get_wrapper_qconfig(self) -> "torch.quantization.QConfig":
"""
:return: QConfig for QuantWrapper objects (input activations used)
"""
qconfig = _get_qconfig(self.input_activations, None)
# add reference to this quantization scheme for reference
qconfig.quantization_scheme = self
return qconfig
def __str__(self) -> str:
"""
:return: YAML friendly string serialization
"""
dict_repr = self.dict()
dict_repr = {
key: val if val is not None else "null" for key, val in dict_repr.items()
}
return str(dict_repr)
The provided code snippet includes necessary dependencies for implementing the `set_quantization_schemes` function. Write a Python function `def set_quantization_schemes( model: Module, scheme: QuantizationScheme, scheme_overrides: Optional[Dict[str, QuantizationScheme]] = None, ignore: Optional[List[str]] = None, strict: bool = True, )` to solve the following problem:
Sets an appropriate `quantization_scheme` to targeted quantizable submodules :param model: module to attach QuantizationSchemes to :param scheme: default scheme to add to a target module unless overwritten by another scheme :param scheme_overrides: dictionary of module type names or submodule names mapped to a quantization scheme to override with. If a submodule matches to multiple submodule overrides and/or a module type, module type will take the highest priority followed by the longest matched submodule name :param ignore: string names of modules type names or submodule names to not include for quantization. Default None :param strict: if True, will raise an error if any module types or submodules in scheme_overrides or ignore are not found in the given module. Default True
Here is the function:
def set_quantization_schemes(
model: Module,
scheme: QuantizationScheme,
scheme_overrides: Optional[Dict[str, QuantizationScheme]] = None,
ignore: Optional[List[str]] = None,
strict: bool = True,
):
"""
Sets an appropriate `quantization_scheme` to targeted quantizable submodules
:param model: module to attach QuantizationSchemes to
:param scheme: default scheme to add to a target module unless overwritten
by another scheme
:param scheme_overrides: dictionary of module type names or submodule names
mapped to a quantization scheme to override with. If a submodule matches
to multiple submodule overrides and/or a module type, module type will
take the highest priority followed by the longest matched submodule name
:param ignore: string names of modules type names or submodule names to not include
for quantization. Default None
:param strict: if True, will raise an error if any module types or submodules in
scheme_overrides or ignore are not found in the given module. Default True
"""
# default to empty dict
scheme_overrides = scheme_overrides or {}
if strict:
_validate_set_module_schemes(model, scheme_overrides, ignore)
# keep mapping of targets for QATWrapper to inject later so module is not modified
# during iteration
wrap_qat_targets = {} # type: Dict[str, QuantizationScheme]
for submodule_name, submodule in model.named_modules():
if ignore and _match_submodule_name_or_type(submodule, submodule_name, ignore):
# submodule type or graph section set to ignore, skip
continue
if isinstance(submodule, torch_quantization.QuantWrapper):
# special case to catch QuantizableMatMul children
if ignore and _match_submodule_name_or_type(
submodule.module, submodule_name, ignore
):
continue
if is_qat_helper_module(submodule):
# ignore children of an already quantized module, if there is a clash it
# will have been caught in the parent
continue
# override default scheme if necessary
override_key = _match_submodule_name_or_type(
submodule, submodule_name, scheme_overrides
)
submodule_scheme = (
scheme if override_key is None else scheme_overrides[override_key]
)
is_module_type_override = override_key == submodule.__class__.__name__
if getattr(submodule, "wrap_qat", False):
# wrap_qat overrides default scheme behavior
wrap_qat_targets[submodule_name] = submodule_scheme
elif is_module_type_override or is_quantizable_module(submodule):
# is base quantizable module or user specifically targeted module type
raise_if_already_quantized(submodule_name, submodule)
submodule.quantization_scheme = submodule_scheme
# inject any targeted QATWrappers
for wraped_module_name, scheme in wrap_qat_targets.items():
_inject_qat_wrapper(model, wraped_module_name, scheme) | Sets an appropriate `quantization_scheme` to targeted quantizable submodules :param model: module to attach QuantizationSchemes to :param scheme: default scheme to add to a target module unless overwritten by another scheme :param scheme_overrides: dictionary of module type names or submodule names mapped to a quantization scheme to override with. If a submodule matches to multiple submodule overrides and/or a module type, module type will take the highest priority followed by the longest matched submodule name :param ignore: string names of modules type names or submodule names to not include for quantization. Default None :param strict: if True, will raise an error if any module types or submodules in scheme_overrides or ignore are not found in the given module. Default True |
21,478 | from typing import Dict, List, Optional
import torch
from packaging import version
from torch.nn import Identity, Module
from sparseml.modifiers.quantization.utils.constants import (
FUSED_MODULE_NAMES,
NON_QUANTIZABLE_MODULE_NAMES,
)
from sparseml.modifiers.quantization.utils.fake_quant_wrapper import FakeQuantizeWrapper
from sparseml.modifiers.quantization.utils.helpers import (
QATWrapper,
configure_module_default_qconfigs,
prepare_embeddings_qat,
)
from sparseml.modifiers.quantization.utils.quantization_scheme import QuantizationScheme
from sparseml.pytorch.utils import get_layer
from sparseml.utils.fsdp.context import fix_fsdp_module_name
try:
from torch import quantization as torch_quantization
from torch.nn import intrinsic as torch_intrinsic
except Exception:
torch_quantization = None
torch_intrinsic = None
def set_qconfigs_from_quantization_schemes(module: Module):
"""
Sets `qconfig` properties to the given module and its submodule
based on any potentially assigned quantization schemes
:param module: module to set qconfig properties for
"""
for submodule in module.modules():
if not hasattr(submodule, "quantization_scheme"):
continue
# potentially re-load if scheme is set as dict or str
quantization_scheme = QuantizationScheme.load(submodule.quantization_scheme)
if isinstance(submodule, torch_quantization.QuantWrapper):
submodule.qconfig = quantization_scheme.get_wrapper_qconfig()
submodule.quant.qconfig = submodule.qconfig
else:
submodule.qconfig = quantization_scheme.get_qconfig()
def add_input_activation_quant_wrappers(module: Module) -> Module:
"""
Adds QuantWrapper objects to wrap submodules that include quantization
schemes targeting input activations
:param module: module to add input activation QuantWrappers for
:return: the updated module - necessary in case top level module is wrapped
as in-place modification will not support it
"""
# check if module targets input activation quantization
quantize_activations = (
hasattr(module, "quantization_scheme")
and (module.quantization_scheme is not None)
and module.quantization_scheme.input_activations is not None
and not isinstance(module, torch.nn.quantized.FloatFunctional)
)
if quantize_activations:
# wrap module with a QuantWrapper and assign it the input activation qconfig
quantization_scheme = module.quantization_scheme
module = torch_quantization.QuantWrapper(module)
module.quantization_scheme = quantization_scheme
# assumes no nested children of a wrapped block need input activation
# does not recurse further in this case
else:
# recurse to module children
for name, child in module.named_children():
setattr(module, name, add_input_activation_quant_wrappers(child))
return module
def add_output_activation_observers(module: Module):
"""
implementation of torch.quantization add_observers_ that only adds observers
according to attached quantization_scheme properties. the existing implementation
(1.9+) includes its own logic for propagating including overriding set qconfigs
for certain activations without the ability to disable this behavior
:param module: module to add output activation observers to
"""
# adapted from torch/ao/quantization/quantize.py::_add_observer_
# source: https://github.com/pytorch/pytorch/blob/v1.13.0/torch/ao/quantization/quantize.py#L135 # noqa: E501
try:
device = next(module.parameters()).device
except StopIteration:
# default to CPU if module has no parameters
device = "cpu"
def _needs_observer(target_module: Module):
# combines logic from multiple places of original implementation which
# mostly checked for existnace of a qconfig and if the target was a leaf
# module
if not hasattr(target_module, "quantization_scheme") or isinstance(
target_module, torch_quantization.QuantWrapper
):
# submodule not targeted for quantization, already has attached
# output observer, or is QuantWrapper (quant wrapper delegates to children)
return False
if hasattr(target_module, "activation_post_process"):
# activation post process is set, only mark for potential overriding
# if it is an identity (this comes up when the property is set for
# later overriding such as FloatFunctional
return isinstance(target_module.activation_post_process, Identity)
for descendent_module in target_module.modules():
if descendent_module is target_module:
continue # skip itself
descendent_scheme = getattr(descendent_module, "quantization_scheme", None)
if descendent_scheme is not None and (
descendent_scheme.output_activations is not None
):
# a descendent of this module targets output activations, return False
return False
# module has a quantization scheme and no descendents track output activations
return True
def _observer_forward_hook(self, inp, output):
# reference for output activation observer hook to register
return self.activation_post_process(output)
def _add_activation_post_process(target_module: Module):
# get output observer
output_observer = submodule.qconfig.activation()
output_observer.to(device)
# add an activation post process module
target_module.add_module("activation_post_process", output_observer)
# add hook to call observer after output activation has been returned
handle = target_module.register_forward_hook(_observer_forward_hook)
target_module._forward_hooks.move_to_end(handle.id, last=False)
for submodule in module.modules():
if not _needs_observer(submodule):
# submodule not targeted for quantization, already has attached
# output observer, or has a descendent that tracks output activations
continue
# extract qconfig and observer from qconfig
if not hasattr(submodule, "qconfig"):
# set qconfig from scheme if not already set
set_qconfigs_from_quantization_schemes(submodule)
assert hasattr(submodule, "qconfig")
# create observer, add as child module, and register hook to call
_add_activation_post_process(submodule)
def _reattach_quantization_schemes(module: Module):
# after torch.prepare_qat is called, quantization scheme properties may be lost
# due to transfer of base module classes to their QAT implementations
# this function uses the reference to the quantization_scheme in the qconfig
# to potentially re-attach the scheme
for submodule in module.modules():
qconfig = getattr(submodule, "qconfig", None)
if not qconfig or hasattr(submodule, "quantization_scheme"):
# no qconfig, or scheme already set
continue
quantization_scheme = getattr(qconfig, "quantization_scheme", None)
if not quantization_scheme:
continue
submodule.quantization_scheme = quantization_scheme
def _get_qat_module_mappings() -> Dict[Module, Module]:
mappings = torch_quantization.quantization_mappings
if not hasattr(mappings, "get_default_qat_module_mappings"):
# legacy
return mappings.get_qat_module_mappings()
# latest
return mappings.get_default_qat_module_mappings()
def configure_module_default_qconfigs(module: Module):
"""
if any submodule of the given module has a configure_qconfig function,
configure_qconfig will be called on that submodule to set the qconfig(s) of that
module to its default
:param module: module to set qconfigs for
"""
for submodule in module.modules():
if hasattr(submodule, "configure_qconfig") and callable(
getattr(submodule, "configure_qconfig")
):
submodule.configure_qconfig()
def prepare_embeddings_qat(
module: Module,
qproperties: Optional[QConfigProperties] = None,
qconfig: Optional["torch.quantization.QConfig"] = None,
):
"""
adds a fake quantize call to the weights of any Embedding modules in the given
module. The used qconfig will have a heirarchy of
submodule.qconfig -> qconfig -> qproperties
:param module: module to run QAT for the embeddings of
:param qconfig: qconfig to generate the fake quantize ops from if qconfig
not set in moduleDefault uses INT8 asymmetric range
:param qproperties: properties used to define QConfig if qconfig not present
"""
if qconfig is None and qproperties is not None:
qproperties.symmetric_weights = False
qconfig = get_qat_qconfig(qproperties)
for submodule in module.modules():
submodule_qconfig = getattr(submodule, "qconfig", None)
submodule_qconfig = submodule_qconfig or qconfig
if isinstance(submodule, Embedding) and submodule_qconfig is not None:
_prepare_qat_embedding(submodule, submodule_qconfig)
The provided code snippet includes necessary dependencies for implementing the `convert_module_qat_from_schemes` function. Write a Python function `def convert_module_qat_from_schemes(module: Module)` to solve the following problem:
Converts submodules with set quantization_schemes into quantization aware modules with FakeQuantize modules in the model :param module: module to convert to QAT mode
Here is the function:
def convert_module_qat_from_schemes(module: Module):
"""
Converts submodules with set quantization_schemes into quantization aware modules
with FakeQuantize modules in the model
:param module: module to convert to QAT mode
"""
# inject necessary QuantWrappers into the module to apply QAT to
# targeted layer input activations
module = add_input_activation_quant_wrappers(module)
# set appropriate qconfig properties in submodules
set_qconfigs_from_quantization_schemes(module)
# override any qconfigs set in `configure_qconfigs` function
configure_module_default_qconfigs(module)
# set modules with proper qconfigs to QAT mode
convert_kwargs = (
dict(convert_custom_config_dict={}) # do not let torch override any qconfigs
if version.parse(torch.__version__) >= version.parse("1.8.0")
else {}
)
torch_quantization.convert(
module,
mapping=_get_qat_module_mappings(),
inplace=True,
remove_qconfig=False,
**convert_kwargs,
)
# re-attach any quantization schemes lost during conversion
_reattach_quantization_schemes(module)
# add observers for output activations
add_output_activation_observers(module)
# manual pass to convert relevant Embedding layers
prepare_embeddings_qat(module) | Converts submodules with set quantization_schemes into quantization aware modules with FakeQuantize modules in the model :param module: module to convert to QAT mode |
21,479 | from typing import Dict, List, Optional
import torch
from packaging import version
from torch.nn import Identity, Module
from sparseml.modifiers.quantization.utils.constants import (
FUSED_MODULE_NAMES,
NON_QUANTIZABLE_MODULE_NAMES,
)
from sparseml.modifiers.quantization.utils.fake_quant_wrapper import FakeQuantizeWrapper
from sparseml.modifiers.quantization.utils.helpers import (
QATWrapper,
configure_module_default_qconfigs,
prepare_embeddings_qat,
)
from sparseml.modifiers.quantization.utils.quantization_scheme import QuantizationScheme
from sparseml.pytorch.utils import get_layer
from sparseml.utils.fsdp.context import fix_fsdp_module_name
The provided code snippet includes necessary dependencies for implementing the `raise_if_torch_quantization_not_available` function. Write a Python function `def raise_if_torch_quantization_not_available()` to solve the following problem:
:raises: RuntimeError if the installed torch version does not include support for quantization aware training
Here is the function:
def raise_if_torch_quantization_not_available():
"""
:raises: RuntimeError if the installed torch version does not include
support for quantization aware training
"""
if torch_quantization is None or torch_intrinsic is None:
raise RuntimeError(
"Unable to import package torch.quantization and/or "
"torch.nn.intrinsic. "
"Try upgrading your PyTorch version to use the QuantizationModifier."
) | :raises: RuntimeError if the installed torch version does not include support for quantization aware training |
21,480 | from copy import deepcopy
from functools import partial
from typing import Any, Dict, Optional, Union
import torch
from packaging import version
from pydantic import BaseModel, Field, validator
from torch.nn import Identity
from sparseml.modifiers.quantization.utils.fake_quant_wrapper import FakeQuantizeWrapper
class QuantizationArgs(BaseModel):
"""
Class representing user facing arguments to define quantization Observers of
activations or weights in a network
"""
num_bits: int = Field(
default=8, description="number of bits to target for quantization"
)
symmetric: bool = Field(
default=False,
description="set True to use symmetric quantization. Default False",
)
strategy: str = Field(
default="tensor",
description=(
"scope of the quantization to be applied. can be 'tensor' or 'channel'"
),
)
kwargs: Dict[str, Any] = Field(
default_factory=dict,
description=(
"optional dict of kwargs to be passed directly to torch quantization "
"Observers constructor excluding quantization range or symmetry"
),
)
def default_activation_args(cls):
"""
:return: default 8 bits asymmetric settings
"""
return cls(num_bits=8, symmetric=False)
def default_weight_args(cls):
"""
:return: default 8 bits symmetric settings
"""
return cls(num_bits=8, symmetric=True)
def get_observer(self) -> "torch.quantization.FakeQuantize":
"""
:return: torch quantization FakeQuantize built based on these QuantizationArgs
"""
return get_observer(
symmetric=self.symmetric,
strategy=self.strategy,
dtype=torch.qint8,
bits=self.num_bits,
reduce_range=self.kwargs.get("reduce_range", False),
qconfig_kwargs=self.kwargs,
)
def validate_strategy(cls, value):
valid_scopes = ["tensor", "channel"]
if value not in valid_scopes:
raise ValueError(f"`strategy` must be one of {valid_scopes}, got {value}")
return value
def get_observer(
symmetric: bool,
strategy: str,
dtype: torch.dtype,
bits: int,
reduce_range: bool,
qconfig_kwargs: Dict[str, Any],
):
quant_min, quant_max, is_custom_qrange = compute_range(dtype, bits)
if strategy == "channel":
qscheme = torch.per_channel_symmetric if symmetric else torch.per_channel_affine
observer_cls = torch_quantization.MovingAveragePerChannelMinMaxObserver
observer_kwargs = dict(
ch_axis=0,
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
)
else: # default to tensor strategy
qscheme = torch.per_tensor_symmetric if symmetric else torch.per_tensor_affine
observer_cls = torch_quantization.MovingAverageMinMaxObserver
observer_kwargs = dict(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
)
"""
in torch 1.9.1, quant_min and quant_max are not passed to observer:
https://github.com/pytorch/pytorch/blob/v1.9.1/torch/quantization/fake_quantize.py#L109
however in 1.12.0, this is fixed so both are passed to observer:
https://github.com/pytorch/pytorch/blob/v1.12.1/torch/ao/quantization/fake_quantize.py#L132
Passing quant_min/quant_max to observer means the observer will have
`self.has_customized_qrange == True` in both 1.9.1 and 1.12.0.
For whatever reason, both versions calculate zero point for
quint8 differently **if there is a customized_qrange**
1. customized qrange has zero point of 127
2. non-customized has zero point of 128.
source:
https://github.com/pytorch/pytorch/blob/v1.12.1/torch/ao/quantization/observer.py#L293
**we want to ensure that the zero point is 128**
see https://github.com/neuralmagic/sparseml/pull/604
"""
if is_custom_qrange:
# for both versions we need to include the custom min/max values in kwargs
observer_kwargs["quant_min"] = quant_min
observer_kwargs["quant_max"] = quant_max
if _TORCH_PRE_112:
# pre 1.12, the observer doesn't get passed the quant_min/quant_max values,
# so we patch them in to the constructor of the observer
observer_cls = partial(
observer_cls, quant_min=quant_min, quant_max=quant_max
)
else:
# if using a non custom qrange, we can rely on default values used by
# the observers
if _TORCH_PRE_112:
# pre 1.12, the observer doesn't get passed the quant_min/quant_max values,
# so we are safe to pass these to FakeQuantize
observer_kwargs["quant_min"] = quant_min
observer_kwargs["quant_max"] = quant_max
else:
# post 1.12 we cannot pass them to the observer since that will set
# has_customized_qrange. instead we rely on the default values
# being equal to the `quant_min` and `quant_max` here.
pass
observer_kwargs["observer"] = observer_cls
observer_kwargs.update(qconfig_kwargs or {})
observer = FakeQuantizeWrapper.with_args(**observer_kwargs)
return observer
def _get_qconfig(
activation_args: Optional[QuantizationArgs], weight_args: Optional[QuantizationArgs]
) -> "torch.quantization.QConfig":
return torch_quantization.QConfig(
activation=activation_args.get_observer() if activation_args else Identity,
weight=weight_args.get_observer() if weight_args else Identity,
) | null |
21,481 | from copy import deepcopy
from functools import partial
from typing import Any, Dict, Optional, Union
import torch
from packaging import version
from pydantic import BaseModel, Field, validator
from torch.nn import Identity
from sparseml.modifiers.quantization.utils.fake_quant_wrapper import FakeQuantizeWrapper
def _parse_quantization_arg(arg: Any):
if arg == "None":
return None
return arg | null |
21,482 | import logging
from typing import Any, Dict, Optional
import torch
from torch.nn import Module
from sparseml.core import Event, EventType, State
from sparseml.modifiers.quantization.base import QuantizationModifier
from sparseml.modifiers.quantization.utils.helpers import (
configure_module_bn_wrappers,
freeze_bn_stats,
fuse_module_conv_bn_relus,
)
from sparseml.modifiers.quantization.utils.quantization_scheme import (
QuantizationScheme,
QuantizationSchemeLoadable,
)
from sparseml.modifiers.quantization.utils.quantize import (
convert_module_qat_from_schemes,
raise_if_torch_quantization_not_available,
set_quantization_schemes,
)
from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward
from sparseml.utils.fsdp.context import summon_full_params_context
class _QuantizationSchemesDict(dict):
# wrapper class for dict to override the __str__ method for yaml serialization
def __str__(self):
return str({submodule: scheme.dict() for submodule, scheme in self.items()})
QuantizationSchemeLoadable = Union[
"QuantizationScheme",
DictQuantizationScheme,
str,
None,
]
class QuantizationScheme(BaseModel):
"""
Class composed of QuantizationArgs to build QConfig and QuantWrapper objects for
quantizing models. Provides a simple user interface for defining how inputs,
weights, and outputs should be quantized
"""
def __init__(self, *args, **kwargs):
# support for loading from yaml str
args = [arg if arg != "null" else None for arg in args]
for key, val in kwargs.items():
if val == "null":
kwargs[key] = None
super().__init__(*args, **kwargs)
input_activations: Optional[QuantizationArgs] = Field(
default_factory=QuantizationArgs.default_activation_args,
description=(
"target quantization setting for input activations. Set to None to "
"not quantize input activations. Default is 8 bits asymmetric"
),
)
weights: Optional[QuantizationArgs] = Field(
default_factory=QuantizationArgs.default_weight_args,
description=(
"target quantization setting for model weights. Set to None to "
"not quantize weights. Default is 8 bits symmetric"
),
)
output_activations: Optional[QuantizationArgs] = Field(
default=None,
description=(
"target quantization setting for output activations. Set to None to "
"not quantize output activations. Default is None"
),
)
target_hardware: Optional[str] = Field(
default=None,
description=(
"target deployment runtime/hardware name to be set by default "
"classmethods. Default is None"
),
)
def load(
cls,
scheme: QuantizationSchemeLoadable,
default: Optional["QuantizationScheme"] = None,
) -> "QuantizationScheme":
"""
:param scheme: QuantizationScheme, dict representation of scheme,
or string alias of a scheme to load. Valid strings:
['default', 'deepsparse', 'tensorrt']
:param default: default QuantizationScheme to override 'default' scheme
with
:return: constructed QuantizationScheme object from the given scheme;
if given a dict, returns QuantizationScheme.parse_obj(scheme), string
input will return the defualt QuantizationScheme if set to 'default'.
"""
if isinstance(scheme, cls):
return scheme
elif scheme is None or scheme == "default":
# if no default override, defaults to QuantizationScheme()
return deepcopy(default) or cls()
elif isinstance(scheme, str):
if scheme == "deepsparse":
return cls.deepsparse()
elif scheme == "tensorrt":
return cls.tensorrt()
raise ValueError(
f"Unrecognized QuantizationScheme string alias {scheme}. "
"Valid strings: ['default', 'deepsparse', 'tensorrt']"
)
elif isinstance(scheme, dict):
# default to dict
scheme = {key: _parse_quantization_arg(arg) for key, arg in scheme.items()}
return cls.parse_obj(scheme)
else:
raise ValueError(
f"Unrecognized type {type(scheme)} for QuantizationScheme.load, "
"expected one of: [QuantizationScheme, Dict, str, None]"
)
def deepsparse(cls) -> "QuantizationScheme":
"""
:return: QuantizationScheme for deepsparse targeted deployments -
int8, symmetric weights, asymmetric inputs, no output quantization
"""
return cls(
input_activations=QuantizationArgs(num_bits=8, symmetric=False),
weights=QuantizationArgs(num_bits=8, symmetric=True),
output_activations=None,
target_hardware="deepsparse",
)
def tensorrt(cls) -> "QuantizationScheme":
"""
:return: QuantizationScheme for tensorrt targeted deployments -
compatibility with explict quantization as supported by TensorRT 8.2:
int8, symmetric for both weights and inputs, no output quantization
"""
return cls(
input_activations=QuantizationArgs(num_bits=8, symmetric=True),
weights=QuantizationArgs(num_bits=8, symmetric=True),
output_activations=None,
target_hardware="tensorrt",
)
def get_qconfig(self) -> "torch.quantization.QConfig":
"""
:return: QConfig for Modules (output activations used,
use QuantWrapper for inputs)
"""
qconfig = _get_qconfig(self.output_activations, self.weights)
# add reference to this quantization scheme for reference
qconfig.quantization_scheme = self
return qconfig
def get_wrapper_qconfig(self) -> "torch.quantization.QConfig":
"""
:return: QConfig for QuantWrapper objects (input activations used)
"""
qconfig = _get_qconfig(self.input_activations, None)
# add reference to this quantization scheme for reference
qconfig.quantization_scheme = self
return qconfig
def __str__(self) -> str:
"""
:return: YAML friendly string serialization
"""
dict_repr = self.dict()
dict_repr = {
key: val if val is not None else "null" for key, val in dict_repr.items()
}
return str(dict_repr)
def _load_quantization_schemes_dict(
schemes_dict: Optional[Dict[str, QuantizationSchemeLoadable]],
default_scheme: QuantizationScheme,
) -> Dict[str, QuantizationScheme]:
if schemes_dict is None:
return {}
return _QuantizationSchemesDict(
{
submodule: QuantizationScheme.load(scheme, default=default_scheme)
for submodule, scheme in schemes_dict.items()
}
) | null |
21,483 | from itertools import cycle
from typing import Callable, Dict, Optional
import torch
from torch.nn import Module
from torch.utils.data import DataLoader
from tqdm import tqdm
from sparseml.pytorch.utils import tensors_module_forward, tensors_to_device
def apply_pad_mask_to_batch(batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Apply a mask to the input ids of a batch. This is used to zero out
padding tokens so they do not contribute to the hessian calculation in the
SparseGPT algorithm
:param batch: batch to apply padding to if it exists
:return: batch with padding zeroed out in the input_ids
"""
batch["input_ids"] = batch["input_ids"] * batch["attention_mask"]
return batch
The provided code snippet includes necessary dependencies for implementing the `run_calibration_forward` function. Write a Python function `def run_calibration_forward( model: Module, calibration_dataloader: DataLoader, num_calibration_steps: Optional[int] = None, calibration_function: Optional[Callable] = None, device: Optional[str] = None, mask_padding: bool = False, )` to solve the following problem:
Helper function used by one-shot modifiers, runs calibration data through a model to update modifier statistics and trigger hooks :param model: PyTorch model to run :param calibration_dataloader: data to use for calibration :param num_calibration_steps: number of items in calibration_dataloader to process, None or a negative number to process all available data :param calibration_function: option to pass a custom forward function for model :param device: option to move the model to a specific device before calibration :param mask_padding: whether to zero out padding tokens during calibration
Here is the function:
def run_calibration_forward(
model: Module,
calibration_dataloader: DataLoader,
num_calibration_steps: Optional[int] = None,
calibration_function: Optional[Callable] = None,
device: Optional[str] = None,
mask_padding: bool = False,
):
"""
Helper function used by one-shot modifiers, runs calibration data through a model to
update modifier statistics and trigger hooks
:param model: PyTorch model to run
:param calibration_dataloader: data to use for calibration
:param num_calibration_steps: number of items in calibration_dataloader to process,
None or a negative number to process all available data
:param calibration_function: option to pass a custom forward function for model
:param device: option to move the model to a specific device before calibration
:param mask_padding: whether to zero out padding tokens during calibration
"""
model.eval()
forward_fn: Callable = (
calibration_function if calibration_function else tensors_module_forward
)
# move model to optional specified device if it is not already there
model_device = next(model.parameters()).device
if device is not None and model_device != device:
model.to(device)
model_device = next(model.parameters()).device
_dataloader = (
calibration_dataloader
if num_calibration_steps is None
else cycle(calibration_dataloader)
)
# run through the calibration data
for batch_idx, batch in enumerate(tqdm(_dataloader)):
if num_calibration_steps and batch_idx >= num_calibration_steps:
break
if mask_padding:
batch = apply_pad_mask_to_batch(batch)
batch = tensors_to_device(batch, model_device)
with torch.no_grad():
forward_fn(batch, module=model) | Helper function used by one-shot modifiers, runs calibration data through a model to update modifier statistics and trigger hooks :param model: PyTorch model to run :param calibration_dataloader: data to use for calibration :param num_calibration_steps: number of items in calibration_dataloader to process, None or a negative number to process all available data :param calibration_function: option to pass a custom forward function for model :param device: option to move the model to a specific device before calibration :param mask_padding: whether to zero out padding tokens during calibration |
21,484 | from pathlib import Path
import click
def export():
from yolact.export import main as run_export
run_export() | null |
21,485 | from pathlib import Path
import click
def train():
from yolact.train import main as run_train
run_train() | null |
21,486 | from pathlib import Path
import click
def val():
from yolact.eval import main as run_val
run_val() | null |
21,487 | from pathlib import Path
import click
The provided code snippet includes necessary dependencies for implementing the `download` function. Write a Python function `def download(test: bool = False)` to solve the following problem:
A command line callable to download training/test coco dataset for yolact
Here is the function:
def download(test: bool = False):
"""
A command line callable to download training/test coco dataset for yolact
"""
import os as _os
import subprocess as _subprocess
try:
yolact_folder = Path(_os.path.abspath(__file__)).parent.resolve()
bash_script = "COCO_test.sh" if test else "COCO.sh"
_subprocess.check_call(
[
"bash",
_os.path.join(yolact_folder, bash_script),
]
)
except Exception as data_download_exception:
raise ValueError(
"Unable to download coco with the "
f"following exception {data_download_exception}"
) | A command line callable to download training/test coco dataset for yolact |
21,488 | import json
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Union
import numpy
from pydantic import BaseModel, Field, root_validator
from sparseml.utils import clean_path, create_parent_dirs
class ModelResult(Result):
"""
Class for storing the results of an analysis for an entire model
"""
analysis_type: str = Field(
title="analysis_type",
description="name of the type of analysis that was performed",
)
layer_results: Dict[str, Result] = Field(
title="layer_results",
default_factory=dict,
description=(
"dict of layer results to initialize for this analysis. should map "
"layer name to Result object"
),
)
_ANALYSIS_TYPE_TO_CLASS = {
PruningSensitivityResultTypes.LOSS.value: PruningSensitivityResult,
PruningSensitivityResultTypes.PERF.value: PruningSensitivityResult,
}
def _model_result_from_dict(model_result_dict: Dict[str, Any]) -> ModelResult:
if "analysis_type" not in model_result_dict:
raise ValueError(
"'analysis_type' must be a dict key of a ModelResult dict found keys: "
f"{list(model_result_dict.keys())}"
)
result_class = _ANALYSIS_TYPE_TO_CLASS.get(
model_result_dict["analysis_type"], ModelResult
)
return result_class.parse_obj(model_result_dict) | null |
21,489 | import json
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Union
import numpy
from pydantic import BaseModel, Field, root_validator
from sparseml.utils import clean_path, create_parent_dirs
class LayerInfo(BaseModel):
def check_params_if_prunable(_, values):
def linear_layer(
cls, name: str, in_channels: int, out_channels: int, bias: bool, **kwargs
) -> "LayerInfo":
def conv_layer(
cls,
name: str,
in_channels: int,
out_channels: int,
kernel_shape: List[int],
bias: bool,
groups: int = 1,
stride: Union[int, List[int]] = 1,
padding: List[int] = None,
**kwargs,
) -> "LayerInfo":
def _is_layer_info_dict(obj: Any) -> bool:
return isinstance(obj, Dict) and all(
isinstance(val, LayerInfo) for val in obj.values()
) | null |
21,490 | import logging
from typing import Any, Dict, List, Optional, Type
from sparseml import Framework, execute_in_sparseml_framework
from sparseml.base import detect_frameworks
from sparseml.sparsification.analyzer import Analyzer
from sparseml.sparsification.recipe_builder import PruningRecipeBuilder
from sparseml.sparsification.recipe_editor import run_avaialble_recipe_editors
_LOGGER = logging.getLogger(__name__)
def detect_frameworks(item: Any) -> List[Framework]:
"""
Detects the supported ML frameworks for a given item.
Supported input types are the following:
- A Framework enum
- A string of any case representing the name of the framework
(deepsparse, onnx, keras, pytorch, tensorflow_v1)
- A supported file type within the framework such as model files:
(onnx, pth, h5, pb)
- An object from a supported ML framework such as a model instance
If the framework cannot be determined, an empty list will be returned
:param item: The item to detect the ML framework for
:type item: Any
:return: The detected ML frameworks from the given item
:rtype: List[Framework]
"""
_LOGGER.debug("detecting frameworks for %s", item)
frameworks = []
if isinstance(item, str) and item.lower().strip() in Framework.__members__:
_LOGGER.debug("framework detected from Framework string instance")
item = Framework[item.lower().strip()]
if isinstance(item, Framework):
_LOGGER.debug("framework detected from Framework instance")
if item != Framework.unknown:
frameworks.append(item)
else:
_LOGGER.debug("detecting frameworks by calling into supported frameworks")
frameworks = []
for test in Framework:
if test == Framework.unknown:
continue
try:
detected = _execute_sparseml_package_function(
test, "detect_framework", item
)
if detected != Framework.unknown:
frameworks.append(detected)
except Exception as err:
# errors are expected if the framework is not installed, log as debug
_LOGGER.debug(
"error while calling detect_framework for %s: %s", test, err
)
_LOGGER.info("detected frameworks of %s from %s", frameworks, item)
return frameworks
class Analyzer(ABC):
"""
Base abstract class for model analyzers. Analyzers should be able to detect
if given a ModelInfo object and other keyword inputs if they should run their
analysis.
:param model_info: ModelInfo object of the model to be analyzed. after
running this analysis, the analysis_results of this ModelInfo object
will be updated
"""
def __init__(self, model_info: ModelInfo):
self._model_info = model_info
self.result = self._initialize_result() # type: ModelResult
def available(cls, model_info: ModelInfo, **kwargs) -> bool:
"""
Abstract method that subclasses must implement to determine if
given the model info and keyword arguments that the Analyzer can
run its analysis
:param model_info: ModelInfo object of the model to be analyzed
:param kwargs: additional keyword arguments that will be passed to the run
function
:return: True if given the inputs, this analyzer can run its analysis. False
otherwise
"""
raise NotImplementedError()
def run(self, show_progress: bool = False, **kwargs) -> ModelResult:
"""
Runs the given analysis by calling to the underlying run_iter method
:param show_progress: set True to display a tqdm progress bar. default is False
:param kwargs: key word arguments validated by available() to run this analysis
:return: the final result from this analysis. this result will also be
added to the ModelInfo object of this Analyzer
"""
bar = None
prev_progress = 0
for progress, _ in self.run_iter(**kwargs):
if show_progress and bar is None:
bar = tqdm(
total=progress.total_steps,
desc=f"{self.result.analysis_type} Analyzer Progress",
)
if bar is not None:
bar.update(progress.step - prev_progress)
prev_progress = progress.step
if bar is not None:
bar.close()
return self.result
def run_iter(
self,
**kwargs,
) -> Generator[Tuple[AnalyzerProgress, ModelResult], None, None]:
"""
runs the analysis stepwise using the abstract _run_iter method yielding an
AnalyzerProgress and the in progress ModelResult at each step
After the last step, the final results will be added to the given ModelInfo
:param kwargs: key word arguments validated by available() to run this analysis
"""
for progress, result in self._run_iter(**kwargs):
yield progress, result
self._model_info.add_analysis_result(self.result)
def _initialize_result(self) -> ModelResult:
# sets the initial ModelResult object for this analysis
# such as analysis_type, layer selection, and result value initialization
raise NotImplementedError()
def _run_iter(
self,
**kwargs,
) -> Generator[Tuple[AnalyzerProgress, ModelResult], None, None]:
# runs the analysis and updates self.result
raise NotImplementedError()
class PruningRecipeBuilder(RecipeYAMLBuilder):
"""
Builds a basic, editable pruning recipe based on a given model info
standardized variables may be modified by constructor, or later on
| Sample yaml:
| num_epochs: 100
| init_lr: 0.0001
| pruning_start_target: 0.0
| pruning_end_target: 0.6
| pruning_update_frequency: 0.5
| base_target_sparsity: 0.8
| mask_type: unstructured
|
| training_modifiers:
| - !EpochRangeModifier
| start_epoch: 0.0
| end_epoch: eval(num_epochs)
|
| - !SetLearningRateModifier
| start_epoch: 0.0
| learning_rate: eval(init_lr)
|
| pruning_modifiers:
| - !GMPruningModifier
| params:
| - ... # based on prunable param names found in ModelInfo
| init_sparsity: 0.0
| final_sparsity: eval(base_target_sparsity)
| start_epoch: eval(pruning_start_target * num_epochs)
| end_epoch: eval(pruning_end_target * num_epochs)
| update_frequency: eval(pruning_update_frequency)
| mask_type: eval(mask_type)
:param model_info: model info object to extract layer information from
:param num_epochs: total number of epochs the recipe should run for. Default is 100
:param init_lr: initial learning rate value. Default is 0.0001
:param pruning_start_target: epoch that pruning should begin. this value
should be in range [0.0,1.0] representing the fraction of num_epochs
that the start epoch should be. (start_epoch=pruning_start_target*num_epochs).
Default is 0.0
:param pruning_end_target: epoch that pruning should complete. this value
should be in range [0.0,1.0] representing the fraction of num_epochs
that the end epoch should be. (end_epoch=pruning_end_target*num_epochs).
Default is 0.6
:param base_target_sparsity: target sparsity for pruning layers to. Default is 0.8
:param pruning_update_frequency: udpate frequency for pruning modifier.
Default is 0.5
:param mask_type: mask type to set the pruning modifier to. Default is unstructured
"""
def __init__(
self,
model_info: ModelInfo,
num_epochs: float = 100.0,
init_lr: float = 0.0001,
pruning_start_target: float = 0.0,
pruning_end_target: float = 0.6,
base_target_sparsity: float = 0.8,
pruning_update_frequency: float = 0.5,
mask_type: str = "unstructured",
):
self.num_epochs = num_epochs
self.init_lr = init_lr
self.pruning_start_target = pruning_start_target
self.pruning_end_target = pruning_end_target
self.pruning_update_frequency = pruning_update_frequency
self.base_target_sparsity = base_target_sparsity
self.mask_type = mask_type
super().__init__(
variables=dict(
num_epochs=self.num_epochs,
init_lr=self.init_lr,
pruning_start_target=self.pruning_start_target,
pruning_end_target=self.pruning_end_target,
pruning_update_frequency=self.pruning_update_frequency,
base_target_sparsity=self.base_target_sparsity,
mask_type=self.mask_type,
),
modifier_groups=dict(
training_modifiers=self._base_training_modifiers(),
pruning_modifiers=self._base_pruning_modifiers(model_info),
),
)
def __setattr__(self, key: str, value: Any):
# allow updates to base variables to propagate to the internal vars dict
if key in dir(self) and self.has_variable(key):
self.set_variable(key, value)
super().__setattr__(key, value)
def _base_training_modifiers() -> List[ModifierYAMLBuilder]:
epoch_modifier = ModifierYAMLBuilder(
EpochRangeModifier, start_epoch=0.0, end_epoch="eval(num_epochs)"
)
init_lr_modifier = ModifierYAMLBuilder(
SetLearningRateModifier,
learning_rate="eval(init_lr)",
)
return [epoch_modifier, init_lr_modifier]
def _base_pruning_modifiers(model_info: ModelInfo) -> List[ModifierYAMLBuilder]:
pruning_modifier = ModifierYAMLBuilder(
GMPruningModifier,
params=list(model_info.get_prunable_param_names()),
init_sparsity=0.0,
final_sparsity="eval(base_target_sparsity)",
start_epoch="eval(pruning_start_target * num_epochs)",
end_epoch="eval(pruning_end_target * num_epochs)",
update_frequency="eval(pruning_update_frequency)",
mask_type="eval(mask_type)",
)
return [pruning_modifier]
def build_yaml_str(self) -> str:
"""
:return: yaml string representation of this recipe in standard format
"""
for pruning_modifier in self.get_modifier_builders(GMPruningModifier):
params = pruning_modifier.params
if isinstance(params, list):
pruning_modifier.params = list(sorted(params))
return super().build_yaml_str()
def run_avaialble_recipe_editors(
model_info: ModelInfo, recipe_builder: RecipeYAMLBuilder
):
"""
runs all recipe editors that are available for the given model info and builder
:param model_info: ModelInfo object of the model the recipe is to be created
for; should contain layer information and analysis
:param recipe_builder: RecipeYAMLBuilder of the recipe to update
"""
editor_names = [editor.__name__ for editor in _EDITORS]
_LOGGER.debug(
"checking eligibility and running recipe editors: %s", ", ".join(editor_names)
)
for editor_name, editor in zip(editor_names, _EDITORS):
if not editor.available(model_info, recipe_builder):
continue
_LOGGER.info(f"Running recipe editor {editor_name}")
editor.update_recipe(model_info, recipe_builder)
The provided code snippet includes necessary dependencies for implementing the `create_pruning_recipe` function. Write a Python function `def create_pruning_recipe( model: Any, save_path: Optional[str] = None, analyzer_kwargs: Optional[Dict[str, Any]] = None, skip_analyzer_types: Optional[List[Type[Analyzer]]] = None, ) -> Optional[str]` to solve the following problem:
:param model: loaded framework model or model file path of a model to create a recipe for :param save_path: optional path to save the created recipe to :param analyzer_kwargs: keyword arguments to be passed to the available() and run() functions of analyzer objects :param skip_analyzer_types: list of Analyzer class types not to run even if available :return: string of the created recipe if None is provided
Here is the function:
def create_pruning_recipe(
model: Any,
save_path: Optional[str] = None,
analyzer_kwargs: Optional[Dict[str, Any]] = None,
skip_analyzer_types: Optional[List[Type[Analyzer]]] = None,
) -> Optional[str]:
"""
:param model: loaded framework model or model file path of a model to create
a recipe for
:param save_path: optional path to save the created recipe to
:param analyzer_kwargs: keyword arguments to be passed to the available()
and run() functions of analyzer objects
:param skip_analyzer_types: list of Analyzer class types not to run even
if available
:return: string of the created recipe if None is provided
"""
frameworks = detect_frameworks(model)
framework = Framework.onnx if Framework.onnx in frameworks else frameworks[0]
if framework is Framework.unknown:
raise ValueError(f"Unable to detect framework for model {model}")
_LOGGER.info(f"Creating pruning recipe for model of framework {framework}")
# Build ModelInfo
model_info = execute_in_sparseml_framework(model, "ModelInfo", model=model)
model = model_info.validate_model(model) # perform any loading/parsing
# run available analyses
analyzer_impls = execute_in_sparseml_framework(framework, "get_analyzer_impls")
analyzer_kwargs = analyzer_kwargs or {}
if "model" not in analyzer_kwargs:
analyzer_kwargs["model"] = model
if "show_progress" not in analyzer_kwargs:
analyzer_kwargs["show_progress"] = True
for analyzer_impl in analyzer_impls:
if skip_analyzer_types and (
analyzer_impl in skip_analyzer_types
or issubclass(analyzer_impl, tuple(skip_analyzer_types))
):
_LOGGER.debug(
"skipping analyzer %s due to skip_analyzer_types",
analyzer_impl.__name__,
)
continue
if not analyzer_impl.available(model_info, **analyzer_kwargs):
_LOGGER.debug("analyzer %s unavailable", analyzer_impl.__name__)
continue
_LOGGER.info(f"Running {analyzer_impl.__name__}")
analyzer_impl(model_info).run(**analyzer_kwargs)
# build pruning recipe and run editors
pruning_recipe = PruningRecipeBuilder(model_info=model_info)
run_avaialble_recipe_editors(model_info, pruning_recipe)
if save_path is None:
return pruning_recipe.build_yaml_str()
else:
_LOGGER.info(f"Saving oracle recipe to {save_path}")
pruning_recipe.save_yaml(save_path) | :param model: loaded framework model or model file path of a model to create a recipe for :param save_path: optional path to save the created recipe to :param analyzer_kwargs: keyword arguments to be passed to the available() and run() functions of analyzer objects :param skip_analyzer_types: list of Analyzer class types not to run even if available :return: string of the created recipe if None is provided |
21,491 | import textwrap
from copy import deepcopy
from typing import Any, Dict, List, Optional, Type, Union
import yaml
from sparseml.optim import BaseModifier, ModifierProp
from sparseml.sparsification.model_info import ModelInfo
from sparseml.sparsification.modifier_epoch import EpochRangeModifier
from sparseml.sparsification.modifier_lr import SetLearningRateModifier
from sparseml.sparsification.modifier_pruning import GMPruningModifier
from sparseml.utils import create_parent_dirs
The provided code snippet includes necessary dependencies for implementing the `to_yaml_str` function. Write a Python function `def to_yaml_str(val: Any) -> str` to solve the following problem:
:param val: value to get yaml str value of :return: direct str cast of val if it is an int, float, or bool, otherwise the stripped output of yaml.dump
Here is the function:
def to_yaml_str(val: Any) -> str:
"""
:param val: value to get yaml str value of
:return: direct str cast of val if it is an int, float, or bool, otherwise
the stripped output of yaml.dump
"""
if isinstance(val, (str, int, float, bool)):
return str(val)
else:
yaml_str = yaml.dump(val).strip()
if isinstance(val, (Dict, List)):
yaml_str = "\n" + yaml_str
return yaml_str | :param val: value to get yaml str value of :return: direct str cast of val if it is an int, float, or bool, otherwise the stripped output of yaml.dump |
21,492 | import argparse
import logging
import os
from enum import Enum
from typing import Any, List, Optional
from pydantic import BaseModel, Field
from sparseml.base import execute_in_sparseml_framework
from sparseml.utils import clean_path, create_parent_dirs
class SparsificationInfo(BaseModel):
"""
Class for storing the information for sparsifying in a given framework.
Extends pydantics BaseModel class for serialization to and from json
in addition to proper type checking on construction.
"""
modifiers: List[ModifierInfo] = Field(
default=[],
title="modifiers",
description="A list of the information for the available modifiers",
)
def type_modifiers(self, type_: ModifierType) -> List[ModifierInfo]:
"""
Get the contained Modifiers for a specific ModifierType.
:param type_: The ModifierType to filter the returned list of Modifiers by.
:type type_: ModifierType
:return: The filtered list of Modifiers that match the given type_.
:rtype: List[ModifierInfo]
"""
modifiers = []
for mod in self.modifiers:
if mod.type_ == type_:
modifiers.append(mod)
return modifiers
The provided code snippet includes necessary dependencies for implementing the `load_sparsification_info` function. Write a Python function `def load_sparsification_info(load: str) -> SparsificationInfo` to solve the following problem:
Load the sparsification info from a file or raw json. If load exists as a path, will read from the file and use that. Otherwise will try to parse the input as a raw json str. :param load: Either a file path to a json file or a raw json string. :type load: str :return: The loaded sparsification info. :rtype: SparsificationInfo
Here is the function:
def load_sparsification_info(load: str) -> SparsificationInfo:
"""
Load the sparsification info from a file or raw json.
If load exists as a path, will read from the file and use that.
Otherwise will try to parse the input as a raw json str.
:param load: Either a file path to a json file or a raw json string.
:type load: str
:return: The loaded sparsification info.
:rtype: SparsificationInfo
"""
load_path = clean_path(load)
if os.path.exists(load_path):
with open(load_path, "r") as file:
load = file.read()
info = SparsificationInfo.parse_raw(load)
return info | Load the sparsification info from a file or raw json. If load exists as a path, will read from the file and use that. Otherwise will try to parse the input as a raw json str. :param load: Either a file path to a json file or a raw json string. :type load: str :return: The loaded sparsification info. :rtype: SparsificationInfo |
21,493 | import argparse
import logging
import os
from enum import Enum
from typing import Any, List, Optional
from pydantic import BaseModel, Field
from sparseml.base import execute_in_sparseml_framework
from sparseml.utils import clean_path, create_parent_dirs
def save_sparsification_info(framework: Any, path: Optional[str] = None):
"""
Save the sparsification info for a given framework.
If path is provided, will save to a json file at that path.
If path is not provided, will print out the info.
:param framework: The item to detect the ML framework for.
See :func:`detect_framework` for more information.
:type framework: Any
:param path: The path, if any, to save the info to in json format.
If not provided will print out the info.
:type path: Optional[str]
"""
_LOGGER.debug(
"saving sparsification info for framework %s to %s",
framework,
path if path else "sys.out",
)
info = (
sparsification_info(framework)
if not isinstance(framework, SparsificationInfo)
else framework
)
if path:
path = clean_path(path)
create_parent_dirs(path)
with open(path, "w") as file:
file.write(info.json())
_LOGGER.info(
"saved sparsification info for framework %s in file at %s", framework, path
),
else:
print(info.json(indent=4))
_LOGGER.info("printed out sparsification info for framework %s", framework)
def _parse_args():
parser = argparse.ArgumentParser(
description=(
"Compile the available setup and information for the sparsification "
"of a model in a given framework."
)
)
parser.add_argument(
"framework",
type=str,
help=(
"the ML framework or path to a framework file to load the "
"sparsification info for"
),
)
parser.add_argument(
"--path",
type=str,
default=None,
help=(
"A full file path to save the sparsification info to. "
"If not supplied, will print out the sparsification info to the console."
),
)
return parser.parse_args()
def _main():
args = _parse_args()
save_sparsification_info(args.framework, args.path) | null |
21,494 | from typing import Tuple, Union
from sparseml.tensorflow_v1.utils import tf_compat
def symmetric_pad2d(
x_tens: tf_compat.Tensor, pad: Union[str, int, Tuple[int, int]], data_format: str
):
"""
Create a symmetric pad op in the current graph and scope.
To do this, pad must be an integer or tuple of integers.
If pad is a string, will not do anything and pad should be passed into
the pool or conv op.
:param x_tens: the tensor to apply padding to
:param pad: the padding to apply symmetrically. If it is a single integer,
will apply to both sides of height and width dimensions.
If it is a tuple, will take the first element as the padding for
both sides of height dimensions and second for booth sides of width ddimension.
:param data_format: either channels_last or channels_first
:return: the padded tensor
"""
if isinstance(pad, str):
# default tensorflow_v1 padding
return x_tens
y_pad = [pad, pad] if isinstance(pad, int) else [pad[0], pad[0]]
x_pad = [pad, pad] if isinstance(pad, int) else [pad[1], pad[1]]
pad_tensor = (
[[0, 0], y_pad, x_pad, [0, 0]]
if data_format == "channels_last"
else [[0, 0], [0, 0], y_pad, x_pad]
)
pad_tensor = tf_compat.constant(pad_tensor)
return tf_compat.pad(x_tens, pad_tensor)
The provided code snippet includes necessary dependencies for implementing the `pool2d` function. Write a Python function `def pool2d( name: str, x_tens: tf_compat.Tensor, type_: str, pool_size: Union[int, Tuple[int, int]], strides: Union[int, Tuple[int, int]] = 1, padding: Union[str, int, Tuple[int, ...]] = "same", data_format: str = "channels_last", )` to solve the following problem:
Create a pool op with the given name in the current graph and scope. Supported are [max, avg, global_avg] :param name: the name to given to the pooling op in the graph :param x_tens: the input tensor to apply pooling to :param type_: the type of pooling to apply, one of [max, avg, global_avg] :param pool_size: the size of the pooling window to apply, if global_avg then is the desired output size :param strides: the stride to apply for the pooling op, if global_avg then is unused :param padding: any padding to apply to the tensor before pooling; if string then uses tensorflows built in padding, else uses symmetric_pad2d :param data_format: either channels_last or channels_first :return: the tensor after pooling
Here is the function:
def pool2d(
name: str,
x_tens: tf_compat.Tensor,
type_: str,
pool_size: Union[int, Tuple[int, int]],
strides: Union[int, Tuple[int, int]] = 1,
padding: Union[str, int, Tuple[int, ...]] = "same",
data_format: str = "channels_last",
):
"""
Create a pool op with the given name in the current graph and scope.
Supported are [max, avg, global_avg]
:param name: the name to given to the pooling op in the graph
:param x_tens: the input tensor to apply pooling to
:param type_: the type of pooling to apply, one of [max, avg, global_avg]
:param pool_size: the size of the pooling window to apply,
if global_avg then is the desired output size
:param strides: the stride to apply for the pooling op,
if global_avg then is unused
:param padding: any padding to apply to the tensor before pooling;
if string then uses tensorflows built in padding, else uses symmetric_pad2d
:param data_format: either channels_last or channels_first
:return: the tensor after pooling
"""
with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
out = symmetric_pad2d(x_tens, padding, data_format)
if type_ == "max":
return tf_compat.layers.max_pooling2d(
out,
pool_size,
strides,
padding if isinstance(padding, str) else "valid",
data_format,
)
elif type_ == "avg":
return tf_compat.layers.average_pooling2d(
out,
pool_size,
strides,
padding if isinstance(padding, str) else "valid",
data_format,
)
elif type_ == "global_avg":
if pool_size != 1 and pool_size != (1, 1):
raise ValueError(
"only output pool_size of 1 is supported for global average pooling"
)
return tf_compat.reduce_mean(
out,
[1, 2] if data_format == "channels_last" else [2, 3],
keepdims=True,
)
else:
raise ValueError("unrecognized type_ given of {}".format(type_)) | Create a pool op with the given name in the current graph and scope. Supported are [max, avg, global_avg] :param name: the name to given to the pooling op in the graph :param x_tens: the input tensor to apply pooling to :param type_: the type of pooling to apply, one of [max, avg, global_avg] :param pool_size: the size of the pooling window to apply, if global_avg then is the desired output size :param strides: the stride to apply for the pooling op, if global_avg then is unused :param padding: any padding to apply to the tensor before pooling; if string then uses tensorflows built in padding, else uses symmetric_pad2d :param data_format: either channels_last or channels_first :return: the tensor after pooling |
21,495 | from typing import Tuple, Union
from sparseml.tensorflow_v1.utils import tf_compat
BN_MOMENTUM = 0.9
BN_EPSILON = 1e-5
def activation(x_tens: tf_compat.Tensor, act: Union[None, str], name: str = "act"):
"""
Create an activation operation in the current graph and scope.
:param x_tens: the tensor to apply the op to
:param act: the activation type to apply, supported:
[None, relu, relu6, sigmoid, softmax]
:param name: the name to give to the activation op in the graph
:return: the created operation
"""
if not act:
return x_tens
if act == "relu":
return tf_compat.nn.relu(x_tens, name=name)
if act == "relu6":
return tf_compat.nn.relu6(x_tens, name=name)
if act == "sigmoid":
return tf_compat.nn.sigmoid(x_tens, name=name)
if act == "softmax":
return tf_compat.nn.softmax(x_tens, name=name)
raise ValueError("unknown act given of {}".format(act))
def symmetric_pad2d(
x_tens: tf_compat.Tensor, pad: Union[str, int, Tuple[int, int]], data_format: str
):
"""
Create a symmetric pad op in the current graph and scope.
To do this, pad must be an integer or tuple of integers.
If pad is a string, will not do anything and pad should be passed into
the pool or conv op.
:param x_tens: the tensor to apply padding to
:param pad: the padding to apply symmetrically. If it is a single integer,
will apply to both sides of height and width dimensions.
If it is a tuple, will take the first element as the padding for
both sides of height dimensions and second for booth sides of width ddimension.
:param data_format: either channels_last or channels_first
:return: the padded tensor
"""
if isinstance(pad, str):
# default tensorflow_v1 padding
return x_tens
y_pad = [pad, pad] if isinstance(pad, int) else [pad[0], pad[0]]
x_pad = [pad, pad] if isinstance(pad, int) else [pad[1], pad[1]]
pad_tensor = (
[[0, 0], y_pad, x_pad, [0, 0]]
if data_format == "channels_last"
else [[0, 0], [0, 0], y_pad, x_pad]
)
pad_tensor = tf_compat.constant(pad_tensor)
return tf_compat.pad(x_tens, pad_tensor)
def conv2d(
name: str,
x_tens: tf_compat.Tensor,
in_chan: int,
out_chan: int,
kernel: int,
stride: int,
padding: str,
act: Union[None, str] = None,
):
"""
Create a convolutional layer with the proper ops and variables.
:param name: the name scope to create the layer under
:param x_tens: the tensor to apply the layer to
:param in_chan: the number of input channels
:param out_chan: the number of output channels
:param kernel: the kernel size to create a convolution for
:param stride: the stride to apply to the convolution
:param padding: the padding to apply to the convolution
:param act: an activation type to add into the layer, supported:
[None, relu, sigmoid, softmax]
:return: the created layer
"""
with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
weight = tf_compat.get_variable(
"weight",
shape=[kernel, kernel, in_chan, out_chan],
initializer=tf_compat.glorot_normal_initializer(),
dtype=tf_compat.float32,
)
bias = tf_compat.get_variable(
"bias",
shape=[out_chan],
initializer=tf_compat.zeros_initializer(),
dtype=tf_compat.float32,
)
x_tens = tf_compat.nn.conv2d(
x_tens, weight, strides=[1, stride, stride, 1], padding=padding, name="conv"
)
x_tens = tf_compat.nn.bias_add(x_tens, bias, name="bias_add")
x_tens = activation(x_tens, act)
return x_tens
The provided code snippet includes necessary dependencies for implementing the `conv2d_block` function. Write a Python function `def conv2d_block( name: str, x_tens: tf_compat.Tensor, training: Union[bool, tf_compat.Tensor], channels: int, kernel_size: int, padding: Union[str, int, Tuple[int, ...]] = "same", stride: int = 1, data_format: str = "channels_last", include_bn: bool = True, include_bias: bool = None, act: Union[None, str] = "relu", kernel_initializer=tf_compat.glorot_uniform_initializer(), bias_initializer=tf_compat.zeros_initializer(), beta_initializer=tf_compat.zeros_initializer(), gamma_initializer=tf_compat.ones_initializer(), )` to solve the following problem:
Create a convolution op and supporting ops (batch norm, activation, etc) in the current graph and scope. :param name: The name to group all ops under in the graph :param x_tens: The input tensor to apply a convolution and supporting ops to :param training: A bool or tensor to indicate if the net is being run in training mode or not. Used for batch norm :param channels: The number of output channels from the conv op :param kernel_size: The size of the kernel to use for the conv op :param padding: Any padding to apply to the tensor before the convolution; if string then uses tensorflows built in padding, else uses symmetric_pad2d :param stride: The stride to apply for the convolution :param data_format: Either channels_last or channels_first :param include_bn: True to include a batch norm operation after the conv, False otherwise :param include_bias: If left unset, will add a bias if not include_bn. Otherwise can be set to True to include a bias after the convolution, False otherwise. :param act: The activation to apply after the conv op and batch norm (if included). Default is "relu", set to None for no activation. :param kernel_initializer: The initializer to use for the convolution kernels :param bias_initializer: The initializer to use for the bias variable, if a bias is included :param beta_initializer: The initializer to use for the beta variable, if batch norm is included :param gamma_initializer: The initializer to use for the gamma variable, if gamma is included :return: the tensor after all ops have been applied
Here is the function:
def conv2d_block(
name: str,
x_tens: tf_compat.Tensor,
training: Union[bool, tf_compat.Tensor],
channels: int,
kernel_size: int,
padding: Union[str, int, Tuple[int, ...]] = "same",
stride: int = 1,
data_format: str = "channels_last",
include_bn: bool = True,
include_bias: bool = None,
act: Union[None, str] = "relu",
kernel_initializer=tf_compat.glorot_uniform_initializer(),
bias_initializer=tf_compat.zeros_initializer(),
beta_initializer=tf_compat.zeros_initializer(),
gamma_initializer=tf_compat.ones_initializer(),
):
"""
Create a convolution op and supporting ops (batch norm, activation, etc)
in the current graph and scope.
:param name: The name to group all ops under in the graph
:param x_tens: The input tensor to apply a convolution and supporting ops to
:param training: A bool or tensor to indicate if the net is being run
in training mode or not. Used for batch norm
:param channels: The number of output channels from the conv op
:param kernel_size: The size of the kernel to use for the conv op
:param padding: Any padding to apply to the tensor before the convolution;
if string then uses tensorflows built in padding, else uses symmetric_pad2d
:param stride: The stride to apply for the convolution
:param data_format: Either channels_last or channels_first
:param include_bn: True to include a batch norm operation after the conv,
False otherwise
:param include_bias: If left unset, will add a bias if not include_bn.
Otherwise can be set to True to include a bias after the convolution,
False otherwise.
:param act: The activation to apply after the conv op and batch norm (if included).
Default is "relu", set to None for no activation.
:param kernel_initializer: The initializer to use for the convolution kernels
:param bias_initializer: The initializer to use for the bias variable,
if a bias is included
:param beta_initializer: The initializer to use for the beta variable,
if batch norm is included
:param gamma_initializer: The initializer to use for the gamma variable,
if gamma is included
:return: the tensor after all ops have been applied
"""
if include_bias is None:
include_bias = not include_bn
with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
out = symmetric_pad2d(x_tens, padding, data_format)
out = tf_compat.layers.conv2d(
out,
filters=channels,
kernel_size=kernel_size,
strides=stride,
padding=padding if isinstance(padding, str) else "valid",
data_format=data_format,
use_bias=include_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer if include_bias else None,
name="conv",
)
if include_bn:
out = tf_compat.layers.batch_normalization(
out,
axis=1 if data_format == "channels_first" else 3,
momentum=BN_MOMENTUM,
epsilon=BN_EPSILON,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
training=training,
name="bn",
)
out = activation(out, act)
return out | Create a convolution op and supporting ops (batch norm, activation, etc) in the current graph and scope. :param name: The name to group all ops under in the graph :param x_tens: The input tensor to apply a convolution and supporting ops to :param training: A bool or tensor to indicate if the net is being run in training mode or not. Used for batch norm :param channels: The number of output channels from the conv op :param kernel_size: The size of the kernel to use for the conv op :param padding: Any padding to apply to the tensor before the convolution; if string then uses tensorflows built in padding, else uses symmetric_pad2d :param stride: The stride to apply for the convolution :param data_format: Either channels_last or channels_first :param include_bn: True to include a batch norm operation after the conv, False otherwise :param include_bias: If left unset, will add a bias if not include_bn. Otherwise can be set to True to include a bias after the convolution, False otherwise. :param act: The activation to apply after the conv op and batch norm (if included). Default is "relu", set to None for no activation. :param kernel_initializer: The initializer to use for the convolution kernels :param bias_initializer: The initializer to use for the bias variable, if a bias is included :param beta_initializer: The initializer to use for the beta variable, if batch norm is included :param gamma_initializer: The initializer to use for the gamma variable, if gamma is included :return: the tensor after all ops have been applied |
21,496 | from typing import Tuple, Union
from sparseml.tensorflow_v1.utils import tf_compat
BN_MOMENTUM = 0.9
BN_EPSILON = 1e-5
def activation(x_tens: tf_compat.Tensor, act: Union[None, str], name: str = "act"):
"""
Create an activation operation in the current graph and scope.
:param x_tens: the tensor to apply the op to
:param act: the activation type to apply, supported:
[None, relu, relu6, sigmoid, softmax]
:param name: the name to give to the activation op in the graph
:return: the created operation
"""
if not act:
return x_tens
if act == "relu":
return tf_compat.nn.relu(x_tens, name=name)
if act == "relu6":
return tf_compat.nn.relu6(x_tens, name=name)
if act == "sigmoid":
return tf_compat.nn.sigmoid(x_tens, name=name)
if act == "softmax":
return tf_compat.nn.softmax(x_tens, name=name)
raise ValueError("unknown act given of {}".format(act))
def symmetric_pad2d(
x_tens: tf_compat.Tensor, pad: Union[str, int, Tuple[int, int]], data_format: str
):
"""
Create a symmetric pad op in the current graph and scope.
To do this, pad must be an integer or tuple of integers.
If pad is a string, will not do anything and pad should be passed into
the pool or conv op.
:param x_tens: the tensor to apply padding to
:param pad: the padding to apply symmetrically. If it is a single integer,
will apply to both sides of height and width dimensions.
If it is a tuple, will take the first element as the padding for
both sides of height dimensions and second for booth sides of width ddimension.
:param data_format: either channels_last or channels_first
:return: the padded tensor
"""
if isinstance(pad, str):
# default tensorflow_v1 padding
return x_tens
y_pad = [pad, pad] if isinstance(pad, int) else [pad[0], pad[0]]
x_pad = [pad, pad] if isinstance(pad, int) else [pad[1], pad[1]]
pad_tensor = (
[[0, 0], y_pad, x_pad, [0, 0]]
if data_format == "channels_last"
else [[0, 0], [0, 0], y_pad, x_pad]
)
pad_tensor = tf_compat.constant(pad_tensor)
return tf_compat.pad(x_tens, pad_tensor)
The provided code snippet includes necessary dependencies for implementing the `depthwise_conv2d_block` function. Write a Python function `def depthwise_conv2d_block( name: str, x_tens: tf_compat.Tensor, training: Union[bool, tf_compat.Tensor], channels: int, kernel_size: int, padding: Union[str, int, Tuple[int, ...]] = "same", stride: int = 1, data_format: str = "channels_last", include_bn: bool = True, include_bias: bool = None, act: Union[None, str] = "relu", kernel_initializer=tf_compat.glorot_uniform_initializer(), bias_initializer=tf_compat.zeros_initializer(), beta_initializer=tf_compat.zeros_initializer(), gamma_initializer=tf_compat.ones_initializer(), )` to solve the following problem:
Create a depthwise convolution op and supporting ops (batch norm, activation, etc) in the current graph and scope. :param name: The name to group all ops under in the graph :param x_tens: The input tensor to apply a convolution and supporting ops to :param training: A bool or tensor to indicate if the net is being run in training mode or not. Used for batch norm :param channels: The number of output channels from the conv op :param kernel_size: The size of the kernel to use for the conv op :param padding: Any padding to apply to the tensor before the convolution; if string then uses tensorflows built in padding, else uses symmetric_pad2d :param stride: The stride to apply for the convolution :param data_format: Either channels_last or channels_first :param include_bn: True to include a batch norm operation after the conv, False otherwise :param include_bias: If left unset, will add a bias if not include_bn. Otherwise can be set to True to include a bias after the convolution, False otherwise. :param act: The activation to apply after the conv op and batch norm (if included). Default is "relu", set to None for no activation. :param kernel_initializer: The initializer to use for the convolution kernels :param bias_initializer: The initializer to use for the bias variable, if a bias is included :param beta_initializer: The initializer to use for the beta variable, if batch norm is included :param gamma_initializer: The initializer to use for the gamma variable, if gamma is included :return: the tensor after all ops have been applied
Here is the function:
def depthwise_conv2d_block(
name: str,
x_tens: tf_compat.Tensor,
training: Union[bool, tf_compat.Tensor],
channels: int,
kernel_size: int,
padding: Union[str, int, Tuple[int, ...]] = "same",
stride: int = 1,
data_format: str = "channels_last",
include_bn: bool = True,
include_bias: bool = None,
act: Union[None, str] = "relu",
kernel_initializer=tf_compat.glorot_uniform_initializer(),
bias_initializer=tf_compat.zeros_initializer(),
beta_initializer=tf_compat.zeros_initializer(),
gamma_initializer=tf_compat.ones_initializer(),
):
"""
Create a depthwise convolution op and supporting ops (batch norm, activation, etc)
in the current graph and scope.
:param name: The name to group all ops under in the graph
:param x_tens: The input tensor to apply a convolution and supporting ops to
:param training: A bool or tensor to indicate if the net is being run
in training mode or not. Used for batch norm
:param channels: The number of output channels from the conv op
:param kernel_size: The size of the kernel to use for the conv op
:param padding: Any padding to apply to the tensor before the convolution;
if string then uses tensorflows built in padding, else uses symmetric_pad2d
:param stride: The stride to apply for the convolution
:param data_format: Either channels_last or channels_first
:param include_bn: True to include a batch norm operation after the conv,
False otherwise
:param include_bias: If left unset, will add a bias if not include_bn.
Otherwise can be set to True to include a bias after the convolution,
False otherwise.
:param act: The activation to apply after the conv op and batch norm (if included).
Default is "relu", set to None for no activation.
:param kernel_initializer: The initializer to use for the convolution kernels
:param bias_initializer: The initializer to use for the bias variable,
if a bias is included
:param beta_initializer: The initializer to use for the beta variable,
if batch norm is included
:param gamma_initializer: The initializer to use for the gamma variable,
if gamma is included
:return: the tensor after all ops have been applied
"""
if include_bias is None:
include_bias = not include_bn
channel_axis = 3 if data_format == "channels_last" else 1
stride = (
[1, stride, stride, 1]
if data_format == "channels_last"
else [1, stride, stride, 1]
)
kernel_shape = (kernel_size, kernel_size, int(x_tens.shape[channel_axis]), 1)
with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
with tf_compat.variable_scope("conv"):
kernel = tf_compat.get_variable(
"kernel",
shape=kernel_shape,
initializer=kernel_initializer,
trainable=True,
)
bias = (
tf_compat.get_variable(
"bias",
shape=(channels,),
initializer=bias_initializer,
trainable=True,
)
if include_bias
else None
)
out = symmetric_pad2d(x_tens, padding, data_format)
out = tf_compat.nn.depthwise_conv2d(
out,
kernel,
stride,
padding=padding.upper() if isinstance(padding, str) else "VALID",
data_format="NHWC" if data_format == "channels_last" else "NCHW",
)
if bias is not None:
out = tf_compat.nn.bias_add(out, bias, data_format)
if include_bn:
out = tf_compat.layers.batch_normalization(
out,
axis=3 if data_format == "channels_last" else 1,
momentum=BN_MOMENTUM,
epsilon=BN_EPSILON,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
training=training,
name="bn",
)
out = activation(out, act)
return out | Create a depthwise convolution op and supporting ops (batch norm, activation, etc) in the current graph and scope. :param name: The name to group all ops under in the graph :param x_tens: The input tensor to apply a convolution and supporting ops to :param training: A bool or tensor to indicate if the net is being run in training mode or not. Used for batch norm :param channels: The number of output channels from the conv op :param kernel_size: The size of the kernel to use for the conv op :param padding: Any padding to apply to the tensor before the convolution; if string then uses tensorflows built in padding, else uses symmetric_pad2d :param stride: The stride to apply for the convolution :param data_format: Either channels_last or channels_first :param include_bn: True to include a batch norm operation after the conv, False otherwise :param include_bias: If left unset, will add a bias if not include_bn. Otherwise can be set to True to include a bias after the convolution, False otherwise. :param act: The activation to apply after the conv op and batch norm (if included). Default is "relu", set to None for no activation. :param kernel_initializer: The initializer to use for the convolution kernels :param bias_initializer: The initializer to use for the bias variable, if a bias is included :param beta_initializer: The initializer to use for the beta variable, if batch norm is included :param gamma_initializer: The initializer to use for the gamma variable, if gamma is included :return: the tensor after all ops have been applied |
21,497 | from typing import Tuple, Union
from sparseml.tensorflow_v1.utils import tf_compat
BN_MOMENTUM = 0.9
BN_EPSILON = 1e-5
def activation(x_tens: tf_compat.Tensor, act: Union[None, str], name: str = "act"):
"""
Create an activation operation in the current graph and scope.
:param x_tens: the tensor to apply the op to
:param act: the activation type to apply, supported:
[None, relu, relu6, sigmoid, softmax]
:param name: the name to give to the activation op in the graph
:return: the created operation
"""
if not act:
return x_tens
if act == "relu":
return tf_compat.nn.relu(x_tens, name=name)
if act == "relu6":
return tf_compat.nn.relu6(x_tens, name=name)
if act == "sigmoid":
return tf_compat.nn.sigmoid(x_tens, name=name)
if act == "softmax":
return tf_compat.nn.softmax(x_tens, name=name)
raise ValueError("unknown act given of {}".format(act))
The provided code snippet includes necessary dependencies for implementing the `dense_block` function. Write a Python function `def dense_block( name: str, x_tens: tf_compat.Tensor, training: Union[bool, tf_compat.Tensor], channels: int, include_bn: bool = False, include_bias: bool = None, dropout_rate: float = None, act: Union[None, str] = "relu", kernel_initializer=tf_compat.glorot_uniform_initializer(), bias_initializer=tf_compat.zeros_initializer(), beta_initializer=tf_compat.zeros_initializer(), gamma_initializer=tf_compat.ones_initializer(), )` to solve the following problem:
Create a dense or fully connected op and supporting ops (batch norm, activation, etc) in the current graph and scope. :param name: The name to group all ops under in the graph :param x_tens: The input tensor to apply a fully connected and supporting ops to :param training: A bool or tensor to indicate if the net is being run in training mode or not. Used for batch norm and dropout :param channels: The number of output channels from the dense op :param include_bn: True to include a batch norm operation after the conv, False otherwise :param include_bias: If left unset, will add a bias if not include_bn. Otherwise can be set to True to include a bias after the convolution, False otherwise. :param dropout_rate: The dropout rate to apply after the fully connected and batch norm if included. If none, will not include batch norm :param act: The activation to apply after the conv op and batch norm (if included). Default is "relu", set to None for no activation. :param kernel_initializer: The initializer to use for the fully connected kernels :param bias_initializer: The initializer to use for the bias variable, if a bias is included :param beta_initializer: The initializer to use for the beta variable, if batch norm is included :param gamma_initializer: The initializer to use for the gamma variable, if gamma is included :return: the tensor after all ops have been applied
Here is the function:
def dense_block(
name: str,
x_tens: tf_compat.Tensor,
training: Union[bool, tf_compat.Tensor],
channels: int,
include_bn: bool = False,
include_bias: bool = None,
dropout_rate: float = None,
act: Union[None, str] = "relu",
kernel_initializer=tf_compat.glorot_uniform_initializer(),
bias_initializer=tf_compat.zeros_initializer(),
beta_initializer=tf_compat.zeros_initializer(),
gamma_initializer=tf_compat.ones_initializer(),
):
"""
Create a dense or fully connected op and supporting ops
(batch norm, activation, etc) in the current graph and scope.
:param name: The name to group all ops under in the graph
:param x_tens: The input tensor to apply a fully connected and supporting ops to
:param training: A bool or tensor to indicate if the net is being run
in training mode or not. Used for batch norm and dropout
:param channels: The number of output channels from the dense op
:param include_bn: True to include a batch norm operation after the conv,
False otherwise
:param include_bias: If left unset, will add a bias if not include_bn.
Otherwise can be set to True to include a bias after the convolution,
False otherwise.
:param dropout_rate: The dropout rate to apply after the fully connected
and batch norm if included. If none, will not include batch norm
:param act: The activation to apply after the conv op and batch norm (if included).
Default is "relu", set to None for no activation.
:param kernel_initializer: The initializer to use for the fully connected kernels
:param bias_initializer: The initializer to use for the bias variable,
if a bias is included
:param beta_initializer: The initializer to use for the beta variable,
if batch norm is included
:param gamma_initializer: The initializer to use for the gamma variable,
if gamma is included
:return: the tensor after all ops have been applied
"""
if include_bias is None:
include_bias = not include_bn
with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
out = tf_compat.layers.dense(
x_tens,
units=channels,
use_bias=include_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer if include_bias else None,
name="fc",
)
if include_bn:
out = tf_compat.layers.batch_normalization(
out,
axis=1,
momentum=BN_MOMENTUM,
epsilon=BN_EPSILON,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
training=training,
name="bn",
)
if dropout_rate and dropout_rate > 0.0:
out = tf_compat.layers.dropout(
out, dropout_rate, training=training, name="dropout"
)
out = activation(out, act)
return out | Create a dense or fully connected op and supporting ops (batch norm, activation, etc) in the current graph and scope. :param name: The name to group all ops under in the graph :param x_tens: The input tensor to apply a fully connected and supporting ops to :param training: A bool or tensor to indicate if the net is being run in training mode or not. Used for batch norm and dropout :param channels: The number of output channels from the dense op :param include_bn: True to include a batch norm operation after the conv, False otherwise :param include_bias: If left unset, will add a bias if not include_bn. Otherwise can be set to True to include a bias after the convolution, False otherwise. :param dropout_rate: The dropout rate to apply after the fully connected and batch norm if included. If none, will not include batch norm :param act: The activation to apply after the conv op and batch norm (if included). Default is "relu", set to None for no activation. :param kernel_initializer: The initializer to use for the fully connected kernels :param bias_initializer: The initializer to use for the bias variable, if a bias is included :param beta_initializer: The initializer to use for the beta variable, if batch norm is included :param gamma_initializer: The initializer to use for the gamma variable, if gamma is included :return: the tensor after all ops have been applied |
21,498 | from typing import Tuple, Union
from sparseml.tensorflow_v1.utils import tf_compat
def activation(x_tens: tf_compat.Tensor, act: Union[None, str], name: str = "act"):
"""
Create an activation operation in the current graph and scope.
:param x_tens: the tensor to apply the op to
:param act: the activation type to apply, supported:
[None, relu, relu6, sigmoid, softmax]
:param name: the name to give to the activation op in the graph
:return: the created operation
"""
if not act:
return x_tens
if act == "relu":
return tf_compat.nn.relu(x_tens, name=name)
if act == "relu6":
return tf_compat.nn.relu6(x_tens, name=name)
if act == "sigmoid":
return tf_compat.nn.sigmoid(x_tens, name=name)
if act == "softmax":
return tf_compat.nn.softmax(x_tens, name=name)
raise ValueError("unknown act given of {}".format(act))
The provided code snippet includes necessary dependencies for implementing the `fc` function. Write a Python function `def fc( name: str, x_tens: tf_compat.Tensor, in_chan: int, out_chan: int, act: Union[None, str] = None, )` to solve the following problem:
Create a fully connected layer with the proper ops and variables. :param name: the name scope to create the layer under :param x_tens: the tensor to apply the layer to :param in_chan: the number of input channels :param out_chan: the number of output channels :param act: an activation type to add into the layer, supported: [None, relu, sigmoid, softmax] :return: the created layer
Here is the function:
def fc(
name: str,
x_tens: tf_compat.Tensor,
in_chan: int,
out_chan: int,
act: Union[None, str] = None,
):
"""
Create a fully connected layer with the proper ops and variables.
:param name: the name scope to create the layer under
:param x_tens: the tensor to apply the layer to
:param in_chan: the number of input channels
:param out_chan: the number of output channels
:param act: an activation type to add into the layer, supported:
[None, relu, sigmoid, softmax]
:return: the created layer
"""
with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
weight = tf_compat.get_variable(
"weight",
shape=[in_chan, out_chan],
initializer=tf_compat.glorot_normal_initializer(),
dtype=tf_compat.float32,
)
bias = tf_compat.get_variable(
"bias",
shape=[out_chan],
initializer=tf_compat.zeros_initializer(),
dtype=tf_compat.float32,
)
x_tens = tf_compat.matmul(x_tens, weight, name="matmul")
x_tens = tf_compat.nn.bias_add(x_tens, bias, name="bias_add")
x_tens = activation(x_tens, act)
return x_tens | Create a fully connected layer with the proper ops and variables. :param name: the name scope to create the layer under :param x_tens: the tensor to apply the layer to :param in_chan: the number of input channels :param out_chan: the number of output channels :param act: an activation type to add into the layer, supported: [None, relu, sigmoid, softmax] :return: the created layer |
21,499 | import functools
import os
from typing import Optional
from sparseml.base import check_version
_TENSORFLOW_MIN_VERSION = "1.8.0"
_TENSORFLOW_MAX_VERSION = "1.16.0"
def check_tensorflow_install(
min_version: Optional[str] = _TENSORFLOW_MIN_VERSION,
max_version: Optional[str] = _TENSORFLOW_MAX_VERSION,
raise_on_error: bool = True,
allow_env_ignore_flag: bool = True,
) -> bool:
"""
Check that the tensorflow package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for tensorflow that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for tensorflow that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:param allow_env_ignore_flag: True to allow the env variable SPARSEML_IGNORE_TFV1
to ignore the tensorflow install and version checks.
False to ignore the ignore flag.
:type allow_env_ignore_flag: bool
:return: If raise_on_error, will return False if tensorflow is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if allow_env_ignore_flag and os.getenv("SPARSEML_IGNORE_TFV1", False):
return True
if tensorflow_err is not None:
if raise_on_error:
raise tensorflow_err
return False
return check_version(
"tensorflow",
min_version,
max_version,
raise_on_error,
alternate_package_names=["tensorflow-gpu"],
)
The provided code snippet includes necessary dependencies for implementing the `require_tensorflow` function. Write a Python function `def require_tensorflow( min_version: Optional[str] = _TENSORFLOW_MIN_VERSION, max_version: Optional[str] = _TENSORFLOW_MAX_VERSION, allow_env_ignore_flag: bool = True, )` to solve the following problem:
Decorator function to require use of tensorflow. Will check that tensorflow package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_tensorflow_install` for more info. :param min_version: The minimum version for tensorflow that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for tensorflow that it must be less than or equal to, if unset will require no maximum version. :type max_version: str :param allow_env_ignore_flag: True to allow the env variable SPARSEML_IGNORE_TFV1 to ignore the tensorflow install and version checks. False to ignore the ignore flag. :type allow_env_ignore_flag: bool
Here is the function:
def require_tensorflow(
min_version: Optional[str] = _TENSORFLOW_MIN_VERSION,
max_version: Optional[str] = _TENSORFLOW_MAX_VERSION,
allow_env_ignore_flag: bool = True,
):
"""
Decorator function to require use of tensorflow.
Will check that tensorflow package is installed and within the bounding
ranges of min_version and max_version if they are set before calling
the wrapped function.
See :func:`check_tensorflow_install` for more info.
:param min_version: The minimum version for tensorflow that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for tensorflow that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param allow_env_ignore_flag: True to allow the env variable SPARSEML_IGNORE_TFV1
to ignore the tensorflow install and version checks.
False to ignore the ignore flag.
:type allow_env_ignore_flag: bool
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
check_tensorflow_install(min_version, max_version, allow_env_ignore_flag)
return func(*args, **kwargs)
return _wrapper
return _decorator | Decorator function to require use of tensorflow. Will check that tensorflow package is installed and within the bounding ranges of min_version and max_version if they are set before calling the wrapped function. See :func:`check_tensorflow_install` for more info. :param min_version: The minimum version for tensorflow that it must be greater than or equal to, if unset will require no minimum version :type min_version: str :param max_version: The maximum version for tensorflow that it must be less than or equal to, if unset will require no maximum version. :type max_version: str :param allow_env_ignore_flag: True to allow the env variable SPARSEML_IGNORE_TFV1 to ignore the tensorflow install and version checks. False to ignore the ignore flag. :type allow_env_ignore_flag: bool |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.