id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
23,643 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def isqrt_approx_bfloat16(x, mantissa_bit=7, exponent_bit=8):
# From fast inverse squre root.
# gamma = 0.0450466 first order Taylor error.
# magic_number = 3/2 * 2^mantissa_bit * (2^(exponent_bit-1) - 1 - gamma)
# gamma = 0.0450466
# magic_n = np.array(round(3.0/2.0 * 2.0**mantissa_bit * (2.0**(exponent_bit-1) - 1.0 - gamma)), dtype=np.int16)
# From https://www.mdpi.com/1099-4300/23/1/86/pdf authored by
# Walczy, C.J.; Moroz, L.V..; Cie´sli´nski, J.L.
# This work is licensed under the Creative Commons Attribution 4.0
# International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
magic_n = np.round(2**mantissa_bit *
(3 * (2.0**(exponent_bit - 1) - 1.0) - 1) / 2 +
np.round(2**mantissa_bit *
(3.7315712401613957182292407381942955 - 2) / 4 -
0.5))
magic_n = np.array(magic_n, dtype=np.int16)
x2 = (x * 0.5).to(torch.bfloat16)
number = x.to(torch.float32).cpu().detach().numpy()
threehalfs = 1.5
y = np.float32(number)
i = y.view(np.int32)
i = (magic_n - np.int32(i >> 17)) << 16
y = i.view(np.float32)
y = torch.from_numpy(y).to(x.device).to(x.dtype)
out = (x2 * y).to(torch.bfloat16)
out = (out * y).to(torch.bfloat16)
out = (threehalfs - out).to(torch.bfloat16)
out = (y * out).to(torch.bfloat16)
# y = y * (threehalfs - (x2 * y * y))
# y = y * (threehalfs - (x2 * y * y))
return out
def isqrt_approx_walcyzk(x, mantissa_bit=23, exponent_bit=8):
assert x.dtype == torch.float32
# From fast inverse squre root.
# gamma = 0.0450466 first order Taylor error.
# magic_number = 3/2 * 2^mantissa_bit * (2^(exponent_bit-1) - 1 - gamma)
# gamma = 0.0450466
# magic_n = np.array(round(3.0/2.0 * 2.0**mantissa_bit * (2.0**(exponent_bit-1) - 1.0 - gamma)), dtype=np.int16)
# From https://www.mdpi.com/1099-4300/23/1/86/pdf authored by
# Walczy, C.J.; Moroz, L.V..; Cie´sli´nski, J.L.
# This work is licensed under the Creative Commons Attribution 4.0
# International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
# magic_n = np.round(2**mantissa_bit *(3* (2.0**(exponent_bit-1) - 1.0) - 1) /2 + np.round(2**mantissa_bit * (3.7315712401613957182292407381942955 - 2)/4 - 0.5))
magic_n = 0x5f376908 # np.array(magic_n, dtype=np.int32)
number = x.cpu().numpy()
threehalfs1 = 1.50087896
threehalfs2 = 1.50000057
x2 = number * 0.5
y = np.float32(number)
i = y.view(np.int32)
i = magic_n - np.int32(i >> 1)
y = i.view(np.float32)
y = y * (threehalfs1 - (x2 * y * y))
y = y * (threehalfs2 - (x2 * y * y))
return torch.from_numpy(y).to(x.device).to(x.dtype)
def reciprocal_approx_isqrt(x,
input_dtype=torch.bfloat16,
output_dtype=torch.bfloat16,
isqrt_dtype=torch.bfloat16):
assert x.dtype == input_dtype
sign = torch.sign(x).to(output_dtype)
out = torch.abs(x)
if isqrt_dtype == torch.float32:
out = isqrt_approx_walcyzk(out.to(isqrt_dtype))
elif isqrt_dtype == torch.bfloat16:
out = isqrt_approx_bfloat16(out.to(isqrt_dtype))
else:
raise NotImplementedError("only support isqrt fp32 and bfloat16.")
out = out.to(output_dtype)
out = out * out
out = out * sign
assert out.dtype == output_dtype
return out | null |
23,644 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _triple
from torch.nn.parameter import Parameter
_FUSED_CLS = [
QuantizedConvBatchNorm2d, QuantizedConvBatchNorm3d,
QuantizedConvTransposeBatchNorm2d, QuantizedConvTransposeBatchNorm3d
]
def update_bn_stats(mod):
if type(mod) in _FUSED_CLS:
mod.update_bn_stats() | null |
23,645 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _triple
from torch.nn.parameter import Parameter
_FUSED_CLS = [
QuantizedConvBatchNorm2d, QuantizedConvBatchNorm3d,
QuantizedConvTransposeBatchNorm2d, QuantizedConvTransposeBatchNorm3d
]
def freeze_bn_stats(mod):
if type(mod) in _FUSED_CLS:
mod.freeze_bn_stats() | null |
23,646 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _triple
from torch.nn.parameter import Parameter
_FUSED_CLS = [
QuantizedConvBatchNorm2d, QuantizedConvBatchNorm3d,
QuantizedConvTransposeBatchNorm2d, QuantizedConvTransposeBatchNorm3d
]
def fuse_conv_bn(mod):
if type(mod) in _FUSED_CLS:
mod.merge_bn_to_conv() | null |
23,647 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _triple
from torch.nn.parameter import Parameter
_FUSED_CLS = [
QuantizedConvBatchNorm2d, QuantizedConvBatchNorm3d,
QuantizedConvTransposeBatchNorm2d, QuantizedConvTransposeBatchNorm3d
]
def clear_non_native_bias(mod):
if type(mod) in _FUSED_CLS:
mod.clear_non_native_bias() | null |
23,648 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.modules import fix_ops
from pytorch_nndct.nn.quantization.ops import tqt_ops
class FakeQuantizer(nn.Module):
def __init__(self, bitwidth):
def forward(self, x):
def _save_to_state_dict(self, destination, prefix, keep_vars):
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
def enable_quant(mod):
if isinstance(mod, FakeQuantizer):
mod.enable_quant() | null |
23,649 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.modules import fix_ops
from pytorch_nndct.nn.quantization.ops import tqt_ops
class FakeQuantizer(nn.Module):
"""Simulate the quantize and dequantize operations in training time.
In general, the output of this module is given by
x_out = (clamp(round(x / scale + zero_point), quant_min, quant_max) - zero_point) * scale
See https://arxiv.org/pdf/1903.08066.pdf
In nndct, we use symmetric quantization and power-of-2 scaling. That is,
zero_point = 0,
quant_min = -2^(bitwidth - 1),
quant_max = 2^(bitwidth - 1) - 1
"""
_version = 2
def __init__(self, bitwidth):
super(FakeQuantizer, self).__init__()
# quant_enabled is registered as buffer to support their replication in DDP.
# Data type is uint8 because NCCL does not support bool tensors.
self.register_buffer('quant_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('bitwidth', torch.tensor([bitwidth],
dtype=torch.uint8))
self.register_buffer('domain', torch.tensor([2**(bitwidth - 1)]).float())
def forward(self, x):
raise NotImplementedError(
'Do not use FakeQuantizer directly, please use its derivatives.')
# PyTorch has been using _save_to_state_dict since 1.2.0.
# See https://github.com/pytorch/pytorch/blob/v1.2.0/torch/nn/modules/module.py.
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(FakeQuantizer, self)._save_to_state_dict(destination, prefix,
keep_vars)
destination.pop(prefix + 'quant_enabled')
destination.pop(prefix + 'domain')
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
# We save 'bitwidth' to state_dict but not load it.
# In low-bit tranining, bitwidth incrementally decreases from 8 -> 6 -> 4.
# So the bitwidth should be get from quantizer's initialization argument
# instead of state dict
# For checkpoint BC with version 1.
replace_map = {'num_bits': 'bitwidth'}
version = local_metadata.get('version', None)
if version is None or version < 2:
keys = list(state_dict.keys())
for key in keys:
key_parts = key.split('.')
weight_name = key_parts[-1]
if weight_name in replace_map:
key_parts[-1] = replace_map[weight_name]
new_key = '.'.join(key_parts)
assert new_key not in state_dict
state_dict[new_key] = state_dict[key]
state_dict.pop(key)
# Check if bitwidth in the state dict but not load it.
missing_bitwidth = False
bitwidth_key = prefix + 'bitwidth'
if bitwidth_key not in state_dict:
missing_bitwidth = True
else:
# The value of bitwidth should be set at initilization.
state_dict.pop(bitwidth_key)
super(FakeQuantizer,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
ignored_params = ['bitwidth', 'quant_enabled', 'domain']
ignored_keys = [prefix + name for name in ignored_params]
for key in ignored_keys:
if key in missing_keys:
if key == bitwidth_key and missing_bitwidth:
continue
missing_keys.remove(key)
else:
print('[WARNING] Unexpected key in state dict:', key)
def disable_quant(mod):
if isinstance(mod, FakeQuantizer):
mod.disable_quant() | null |
23,650 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.modules import fix_ops
from pytorch_nndct.nn.quantization.ops import tqt_ops
class TQTQuantizer(FakeQuantizer):
def __init__(self, bitwidth, tensor_type, method = None):
super(TQTQuantizer, self).__init__(bitwidth)
valid_tensor_types = ['weight', 'act']
if tensor_type not in valid_tensor_types:
raise ValueError(
"'tensor_type' must be one of {}".format(valid_tensor_types))
self.tensor_type = tensor_type
# See TorchQuantizer::quantize() in quantization/torchquantizer.py
if method is not None:
self.method = method
else:
self.method = 3 if tensor_type == 'weight' else 2
self.quantize_fn_cls = tqt_ops.TQTQuantize
self.log_threshold = nn.Parameter(torch.tensor([0.0]))
self.register_buffer('warmup_enabled', torch.tensor([1], dtype=torch.uint8))
self._forward_fn = self._quantize_with_warmup
def _init_threshold(self, x):
"""See Table 2 in https://arxiv.org/pdf/1903.08066.pdf"""
def _max(x):
return np.max(np.abs(x))
def _3sd(x):
y = x.astype(np.float32) if x.dtype == np.float16 else x
return np.abs(np.mean(y + 1e-6)) + 3 * np.std(y)
def _kl_j(x):
"""
Ref paper (Algorithm 1):
"Quantizing Convolutional Neural Networks for Low-Power
High-Throughput Inference Engines" - Sean Settle et al.
https://arxiv.org/pdf/1805.07941.pdf
"""
def calculate_kl_j(x, y):
return np.sum((x - y) * np.log2(x / y))
mn = 0
mx = np.max(np.abs(x))
y = x.astype(np.float32) if x.dtype == np.float16 else x
hist, bin_edges = np.histogram((np.abs(y)),
'sqrt',
range=(mn, mx),
density=True)
hist = hist.astype(x.dtype)
bin_edges = bin_edges.astype(x.dtype)
pdf = hist / np.sum(hist)
cdf = np.cumsum(pdf)
n = pow(2, self.bitwidth.item() - 1)
threshold = []
d = []
if n + 1 > len(bin_edges) - 1:
return bin_edges[(-1)]
else:
for i in range(n + 1, len(bin_edges), 1):
threshold_tmp = (i + 0.5) * (bin_edges[1] - bin_edges[0])
threshold = np.concatenate((threshold, [threshold_tmp]))
p = np.copy(cdf)
p[i - 1:] = 1
x = np.linspace(0.0, 1.0, n)
xp = np.linspace(0.0, 1.0, i)
fp = p[:i]
p_interp = np.interp(x, xp, fp)
x = np.linspace(0.0, 1.0, i)
xp = np.linspace(0.0, 1.0, n)
fp = p_interp
q_interp = np.interp(x, xp, fp)
q = np.copy(p)
q[:i] = q_interp
d_tmp = calculate_kl_j(cdf[np.nonzero(cdf)], q[np.nonzero(cdf)])
d = np.concatenate((d, [d_tmp]))
return threshold[np.argmin(d)]
init_scheme = {'weight': _3sd, 'act': _kl_j}
#init_scheme = {'weight': _max, 'act': _kl_j}
data = x.detach().cpu().numpy()
th = init_scheme[self.tensor_type](data)
# TODO(yuwang): Check if th < 0.
return torch.tensor([th], dtype=x.dtype, device=x.device)
def _forward_pass_input(self, x, log_threshold, domain, method):
return x
def _quantize(self, x, log_threshold, domain, method):
return self.quantize_fn_cls.apply(x, log_threshold, domain,
method)
def _quantize_with_warmup(self, x, log_threshold, domain, method):
self.disable_warmup()
log_threshold.data[0] = torch.log2(self._init_threshold(x))[0]
return self._quantize(x, log_threshold, domain, method)
def forward(self, x):
#if self.quant_enabled[0] == 0:
# return x
#if self.warmup_enabled[0] == 1:
# self.warmup_enabled[0] = 0
# threshold = self._init_threshold(x)
# self.log_threshold.data = torch.log2(threshold)
#return self.quantize_fn_cls.apply(x, self.log_threshold, self.domain,
# self.method)
return self._forward_fn(x, self.log_threshold, self.domain, self.method)
def enable_quant(self, enabled=True):
self.quant_enabled[0] = 1 if enabled else 0
if enabled:
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
else:
self._forward_fn = self._forward_pass_input
return self
def disable_quant(self):
return self.enable_quant(False)
def enable_warmup(self, enabled=True):
self.warmup_enabled[0] = 1 if enabled else 0
self._forward_fn = self._quantize_with_warmup if enabled else self._quantize
return self
def disable_warmup(self):
return self.enable_warmup(False)
def is_warmup_enabled(self):
return self.warmup_enabled[0] == 1
def freeze_quant(self, frozen=True):
self.log_threshold.requires_grad = (not frozen)
def unfreeze_quant(self):
self.freeze_quant(False)
def extra_repr(self):
return 'quant_enabled={}, bitwidth={}, method={}'.format(
self.quant_enabled, self.bitwidth, self.method)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(TQTQuantizer, self)._save_to_state_dict(destination, prefix,
keep_vars)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
super(TQTQuantizer,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
if self.quant_enabled[0] == 0:
self._forward_fn = self._forward_pass_input
def export_quant_info(self):
"""Export trained threshold to TorchQuantizer's quant info [bitwidth, fp].
(1) TQT: qx = clip(round(fx / scale)) * scale, scale = 2^ceil(log2t) / 2^(b-1)
(2) NndctFixNeron: qx = clip(round(fx * scale)) * (1 / scale), scale = 2^fp
Let (1) equals (2), we can get
(3): 2^(b-1) / 2^ceil(log2t) = 2^fp
=> fp = b - 1 - ceil(log2t)
For more details, see nndct/include/cuda/nndct_fix_kernels.cuh::_fix_neuron_v2_device
"""
bitwidth = self.bitwidth.item()
ceil_log2t = torch.ceil(self.log_threshold).item()
return [[bitwidth, int(bitwidth - 1 - ceil_log2t)]]
def import_quant_info(self, qinfo):
bitwidth, fp = qinfo
self.bitwidth[0] = bitwidth
self.log_threshold.data = torch.tensor([bitwidth - 1 - fp],
dtype=self.log_threshold.dtype)
self.disable_warmup()
def enable_warmup(mod):
if isinstance(mod, TQTQuantizer):
mod.enable_warmup() | null |
23,651 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.modules import fix_ops
from pytorch_nndct.nn.quantization.ops import tqt_ops
class TQTQuantizer(FakeQuantizer):
def __init__(self, bitwidth, tensor_type, method = None):
super(TQTQuantizer, self).__init__(bitwidth)
valid_tensor_types = ['weight', 'act']
if tensor_type not in valid_tensor_types:
raise ValueError(
"'tensor_type' must be one of {}".format(valid_tensor_types))
self.tensor_type = tensor_type
# See TorchQuantizer::quantize() in quantization/torchquantizer.py
if method is not None:
self.method = method
else:
self.method = 3 if tensor_type == 'weight' else 2
self.quantize_fn_cls = tqt_ops.TQTQuantize
self.log_threshold = nn.Parameter(torch.tensor([0.0]))
self.register_buffer('warmup_enabled', torch.tensor([1], dtype=torch.uint8))
self._forward_fn = self._quantize_with_warmup
def _init_threshold(self, x):
"""See Table 2 in https://arxiv.org/pdf/1903.08066.pdf"""
def _max(x):
return np.max(np.abs(x))
def _3sd(x):
y = x.astype(np.float32) if x.dtype == np.float16 else x
return np.abs(np.mean(y + 1e-6)) + 3 * np.std(y)
def _kl_j(x):
"""
Ref paper (Algorithm 1):
"Quantizing Convolutional Neural Networks for Low-Power
High-Throughput Inference Engines" - Sean Settle et al.
https://arxiv.org/pdf/1805.07941.pdf
"""
def calculate_kl_j(x, y):
return np.sum((x - y) * np.log2(x / y))
mn = 0
mx = np.max(np.abs(x))
y = x.astype(np.float32) if x.dtype == np.float16 else x
hist, bin_edges = np.histogram((np.abs(y)),
'sqrt',
range=(mn, mx),
density=True)
hist = hist.astype(x.dtype)
bin_edges = bin_edges.astype(x.dtype)
pdf = hist / np.sum(hist)
cdf = np.cumsum(pdf)
n = pow(2, self.bitwidth.item() - 1)
threshold = []
d = []
if n + 1 > len(bin_edges) - 1:
return bin_edges[(-1)]
else:
for i in range(n + 1, len(bin_edges), 1):
threshold_tmp = (i + 0.5) * (bin_edges[1] - bin_edges[0])
threshold = np.concatenate((threshold, [threshold_tmp]))
p = np.copy(cdf)
p[i - 1:] = 1
x = np.linspace(0.0, 1.0, n)
xp = np.linspace(0.0, 1.0, i)
fp = p[:i]
p_interp = np.interp(x, xp, fp)
x = np.linspace(0.0, 1.0, i)
xp = np.linspace(0.0, 1.0, n)
fp = p_interp
q_interp = np.interp(x, xp, fp)
q = np.copy(p)
q[:i] = q_interp
d_tmp = calculate_kl_j(cdf[np.nonzero(cdf)], q[np.nonzero(cdf)])
d = np.concatenate((d, [d_tmp]))
return threshold[np.argmin(d)]
init_scheme = {'weight': _3sd, 'act': _kl_j}
#init_scheme = {'weight': _max, 'act': _kl_j}
data = x.detach().cpu().numpy()
th = init_scheme[self.tensor_type](data)
# TODO(yuwang): Check if th < 0.
return torch.tensor([th], dtype=x.dtype, device=x.device)
def _forward_pass_input(self, x, log_threshold, domain, method):
return x
def _quantize(self, x, log_threshold, domain, method):
return self.quantize_fn_cls.apply(x, log_threshold, domain,
method)
def _quantize_with_warmup(self, x, log_threshold, domain, method):
self.disable_warmup()
log_threshold.data[0] = torch.log2(self._init_threshold(x))[0]
return self._quantize(x, log_threshold, domain, method)
def forward(self, x):
#if self.quant_enabled[0] == 0:
# return x
#if self.warmup_enabled[0] == 1:
# self.warmup_enabled[0] = 0
# threshold = self._init_threshold(x)
# self.log_threshold.data = torch.log2(threshold)
#return self.quantize_fn_cls.apply(x, self.log_threshold, self.domain,
# self.method)
return self._forward_fn(x, self.log_threshold, self.domain, self.method)
def enable_quant(self, enabled=True):
self.quant_enabled[0] = 1 if enabled else 0
if enabled:
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
else:
self._forward_fn = self._forward_pass_input
return self
def disable_quant(self):
return self.enable_quant(False)
def enable_warmup(self, enabled=True):
self.warmup_enabled[0] = 1 if enabled else 0
self._forward_fn = self._quantize_with_warmup if enabled else self._quantize
return self
def disable_warmup(self):
return self.enable_warmup(False)
def is_warmup_enabled(self):
return self.warmup_enabled[0] == 1
def freeze_quant(self, frozen=True):
self.log_threshold.requires_grad = (not frozen)
def unfreeze_quant(self):
self.freeze_quant(False)
def extra_repr(self):
return 'quant_enabled={}, bitwidth={}, method={}'.format(
self.quant_enabled, self.bitwidth, self.method)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(TQTQuantizer, self)._save_to_state_dict(destination, prefix,
keep_vars)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
super(TQTQuantizer,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
if self.quant_enabled[0] == 0:
self._forward_fn = self._forward_pass_input
def export_quant_info(self):
"""Export trained threshold to TorchQuantizer's quant info [bitwidth, fp].
(1) TQT: qx = clip(round(fx / scale)) * scale, scale = 2^ceil(log2t) / 2^(b-1)
(2) NndctFixNeron: qx = clip(round(fx * scale)) * (1 / scale), scale = 2^fp
Let (1) equals (2), we can get
(3): 2^(b-1) / 2^ceil(log2t) = 2^fp
=> fp = b - 1 - ceil(log2t)
For more details, see nndct/include/cuda/nndct_fix_kernels.cuh::_fix_neuron_v2_device
"""
bitwidth = self.bitwidth.item()
ceil_log2t = torch.ceil(self.log_threshold).item()
return [[bitwidth, int(bitwidth - 1 - ceil_log2t)]]
def import_quant_info(self, qinfo):
bitwidth, fp = qinfo
self.bitwidth[0] = bitwidth
self.log_threshold.data = torch.tensor([bitwidth - 1 - fp],
dtype=self.log_threshold.dtype)
self.disable_warmup()
def disable_warmup(mod):
if isinstance(mod, TQTQuantizer):
mod.disable_warmup() | null |
23,652 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.modules import fix_ops
from pytorch_nndct.nn.quantization.ops import tqt_ops
class TQTQuantizer(FakeQuantizer):
def __init__(self, bitwidth, tensor_type, method = None):
super(TQTQuantizer, self).__init__(bitwidth)
valid_tensor_types = ['weight', 'act']
if tensor_type not in valid_tensor_types:
raise ValueError(
"'tensor_type' must be one of {}".format(valid_tensor_types))
self.tensor_type = tensor_type
# See TorchQuantizer::quantize() in quantization/torchquantizer.py
if method is not None:
self.method = method
else:
self.method = 3 if tensor_type == 'weight' else 2
self.quantize_fn_cls = tqt_ops.TQTQuantize
self.log_threshold = nn.Parameter(torch.tensor([0.0]))
self.register_buffer('warmup_enabled', torch.tensor([1], dtype=torch.uint8))
self._forward_fn = self._quantize_with_warmup
def _init_threshold(self, x):
"""See Table 2 in https://arxiv.org/pdf/1903.08066.pdf"""
def _max(x):
return np.max(np.abs(x))
def _3sd(x):
y = x.astype(np.float32) if x.dtype == np.float16 else x
return np.abs(np.mean(y + 1e-6)) + 3 * np.std(y)
def _kl_j(x):
"""
Ref paper (Algorithm 1):
"Quantizing Convolutional Neural Networks for Low-Power
High-Throughput Inference Engines" - Sean Settle et al.
https://arxiv.org/pdf/1805.07941.pdf
"""
def calculate_kl_j(x, y):
return np.sum((x - y) * np.log2(x / y))
mn = 0
mx = np.max(np.abs(x))
y = x.astype(np.float32) if x.dtype == np.float16 else x
hist, bin_edges = np.histogram((np.abs(y)),
'sqrt',
range=(mn, mx),
density=True)
hist = hist.astype(x.dtype)
bin_edges = bin_edges.astype(x.dtype)
pdf = hist / np.sum(hist)
cdf = np.cumsum(pdf)
n = pow(2, self.bitwidth.item() - 1)
threshold = []
d = []
if n + 1 > len(bin_edges) - 1:
return bin_edges[(-1)]
else:
for i in range(n + 1, len(bin_edges), 1):
threshold_tmp = (i + 0.5) * (bin_edges[1] - bin_edges[0])
threshold = np.concatenate((threshold, [threshold_tmp]))
p = np.copy(cdf)
p[i - 1:] = 1
x = np.linspace(0.0, 1.0, n)
xp = np.linspace(0.0, 1.0, i)
fp = p[:i]
p_interp = np.interp(x, xp, fp)
x = np.linspace(0.0, 1.0, i)
xp = np.linspace(0.0, 1.0, n)
fp = p_interp
q_interp = np.interp(x, xp, fp)
q = np.copy(p)
q[:i] = q_interp
d_tmp = calculate_kl_j(cdf[np.nonzero(cdf)], q[np.nonzero(cdf)])
d = np.concatenate((d, [d_tmp]))
return threshold[np.argmin(d)]
init_scheme = {'weight': _3sd, 'act': _kl_j}
#init_scheme = {'weight': _max, 'act': _kl_j}
data = x.detach().cpu().numpy()
th = init_scheme[self.tensor_type](data)
# TODO(yuwang): Check if th < 0.
return torch.tensor([th], dtype=x.dtype, device=x.device)
def _forward_pass_input(self, x, log_threshold, domain, method):
return x
def _quantize(self, x, log_threshold, domain, method):
return self.quantize_fn_cls.apply(x, log_threshold, domain,
method)
def _quantize_with_warmup(self, x, log_threshold, domain, method):
self.disable_warmup()
log_threshold.data[0] = torch.log2(self._init_threshold(x))[0]
return self._quantize(x, log_threshold, domain, method)
def forward(self, x):
#if self.quant_enabled[0] == 0:
# return x
#if self.warmup_enabled[0] == 1:
# self.warmup_enabled[0] = 0
# threshold = self._init_threshold(x)
# self.log_threshold.data = torch.log2(threshold)
#return self.quantize_fn_cls.apply(x, self.log_threshold, self.domain,
# self.method)
return self._forward_fn(x, self.log_threshold, self.domain, self.method)
def enable_quant(self, enabled=True):
self.quant_enabled[0] = 1 if enabled else 0
if enabled:
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
else:
self._forward_fn = self._forward_pass_input
return self
def disable_quant(self):
return self.enable_quant(False)
def enable_warmup(self, enabled=True):
self.warmup_enabled[0] = 1 if enabled else 0
self._forward_fn = self._quantize_with_warmup if enabled else self._quantize
return self
def disable_warmup(self):
return self.enable_warmup(False)
def is_warmup_enabled(self):
return self.warmup_enabled[0] == 1
def freeze_quant(self, frozen=True):
self.log_threshold.requires_grad = (not frozen)
def unfreeze_quant(self):
self.freeze_quant(False)
def extra_repr(self):
return 'quant_enabled={}, bitwidth={}, method={}'.format(
self.quant_enabled, self.bitwidth, self.method)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(TQTQuantizer, self)._save_to_state_dict(destination, prefix,
keep_vars)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
super(TQTQuantizer,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
if self.quant_enabled[0] == 0:
self._forward_fn = self._forward_pass_input
def export_quant_info(self):
"""Export trained threshold to TorchQuantizer's quant info [bitwidth, fp].
(1) TQT: qx = clip(round(fx / scale)) * scale, scale = 2^ceil(log2t) / 2^(b-1)
(2) NndctFixNeron: qx = clip(round(fx * scale)) * (1 / scale), scale = 2^fp
Let (1) equals (2), we can get
(3): 2^(b-1) / 2^ceil(log2t) = 2^fp
=> fp = b - 1 - ceil(log2t)
For more details, see nndct/include/cuda/nndct_fix_kernels.cuh::_fix_neuron_v2_device
"""
bitwidth = self.bitwidth.item()
ceil_log2t = torch.ceil(self.log_threshold).item()
return [[bitwidth, int(bitwidth - 1 - ceil_log2t)]]
def import_quant_info(self, qinfo):
bitwidth, fp = qinfo
self.bitwidth[0] = bitwidth
self.log_threshold.data = torch.tensor([bitwidth - 1 - fp],
dtype=self.log_threshold.dtype)
self.disable_warmup()
def freeze_quant(mod):
if isinstance(mod, TQTQuantizer):
mod.freeze_quant() | null |
23,653 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.modules import fix_ops
from pytorch_nndct.nn.quantization.ops import tqt_ops
class TQTQuantizer(FakeQuantizer):
def __init__(self, bitwidth, tensor_type, method = None):
super(TQTQuantizer, self).__init__(bitwidth)
valid_tensor_types = ['weight', 'act']
if tensor_type not in valid_tensor_types:
raise ValueError(
"'tensor_type' must be one of {}".format(valid_tensor_types))
self.tensor_type = tensor_type
# See TorchQuantizer::quantize() in quantization/torchquantizer.py
if method is not None:
self.method = method
else:
self.method = 3 if tensor_type == 'weight' else 2
self.quantize_fn_cls = tqt_ops.TQTQuantize
self.log_threshold = nn.Parameter(torch.tensor([0.0]))
self.register_buffer('warmup_enabled', torch.tensor([1], dtype=torch.uint8))
self._forward_fn = self._quantize_with_warmup
def _init_threshold(self, x):
"""See Table 2 in https://arxiv.org/pdf/1903.08066.pdf"""
def _max(x):
return np.max(np.abs(x))
def _3sd(x):
y = x.astype(np.float32) if x.dtype == np.float16 else x
return np.abs(np.mean(y + 1e-6)) + 3 * np.std(y)
def _kl_j(x):
"""
Ref paper (Algorithm 1):
"Quantizing Convolutional Neural Networks for Low-Power
High-Throughput Inference Engines" - Sean Settle et al.
https://arxiv.org/pdf/1805.07941.pdf
"""
def calculate_kl_j(x, y):
return np.sum((x - y) * np.log2(x / y))
mn = 0
mx = np.max(np.abs(x))
y = x.astype(np.float32) if x.dtype == np.float16 else x
hist, bin_edges = np.histogram((np.abs(y)),
'sqrt',
range=(mn, mx),
density=True)
hist = hist.astype(x.dtype)
bin_edges = bin_edges.astype(x.dtype)
pdf = hist / np.sum(hist)
cdf = np.cumsum(pdf)
n = pow(2, self.bitwidth.item() - 1)
threshold = []
d = []
if n + 1 > len(bin_edges) - 1:
return bin_edges[(-1)]
else:
for i in range(n + 1, len(bin_edges), 1):
threshold_tmp = (i + 0.5) * (bin_edges[1] - bin_edges[0])
threshold = np.concatenate((threshold, [threshold_tmp]))
p = np.copy(cdf)
p[i - 1:] = 1
x = np.linspace(0.0, 1.0, n)
xp = np.linspace(0.0, 1.0, i)
fp = p[:i]
p_interp = np.interp(x, xp, fp)
x = np.linspace(0.0, 1.0, i)
xp = np.linspace(0.0, 1.0, n)
fp = p_interp
q_interp = np.interp(x, xp, fp)
q = np.copy(p)
q[:i] = q_interp
d_tmp = calculate_kl_j(cdf[np.nonzero(cdf)], q[np.nonzero(cdf)])
d = np.concatenate((d, [d_tmp]))
return threshold[np.argmin(d)]
init_scheme = {'weight': _3sd, 'act': _kl_j}
#init_scheme = {'weight': _max, 'act': _kl_j}
data = x.detach().cpu().numpy()
th = init_scheme[self.tensor_type](data)
# TODO(yuwang): Check if th < 0.
return torch.tensor([th], dtype=x.dtype, device=x.device)
def _forward_pass_input(self, x, log_threshold, domain, method):
return x
def _quantize(self, x, log_threshold, domain, method):
return self.quantize_fn_cls.apply(x, log_threshold, domain,
method)
def _quantize_with_warmup(self, x, log_threshold, domain, method):
self.disable_warmup()
log_threshold.data[0] = torch.log2(self._init_threshold(x))[0]
return self._quantize(x, log_threshold, domain, method)
def forward(self, x):
#if self.quant_enabled[0] == 0:
# return x
#if self.warmup_enabled[0] == 1:
# self.warmup_enabled[0] = 0
# threshold = self._init_threshold(x)
# self.log_threshold.data = torch.log2(threshold)
#return self.quantize_fn_cls.apply(x, self.log_threshold, self.domain,
# self.method)
return self._forward_fn(x, self.log_threshold, self.domain, self.method)
def enable_quant(self, enabled=True):
self.quant_enabled[0] = 1 if enabled else 0
if enabled:
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
else:
self._forward_fn = self._forward_pass_input
return self
def disable_quant(self):
return self.enable_quant(False)
def enable_warmup(self, enabled=True):
self.warmup_enabled[0] = 1 if enabled else 0
self._forward_fn = self._quantize_with_warmup if enabled else self._quantize
return self
def disable_warmup(self):
return self.enable_warmup(False)
def is_warmup_enabled(self):
return self.warmup_enabled[0] == 1
def freeze_quant(self, frozen=True):
self.log_threshold.requires_grad = (not frozen)
def unfreeze_quant(self):
self.freeze_quant(False)
def extra_repr(self):
return 'quant_enabled={}, bitwidth={}, method={}'.format(
self.quant_enabled, self.bitwidth, self.method)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(TQTQuantizer, self)._save_to_state_dict(destination, prefix,
keep_vars)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
super(TQTQuantizer,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
self._forward_fn = self._quantize_with_warmup if self.warmup_enabled[
0] == 1 else self._quantize
if self.quant_enabled[0] == 0:
self._forward_fn = self._forward_pass_input
def export_quant_info(self):
"""Export trained threshold to TorchQuantizer's quant info [bitwidth, fp].
(1) TQT: qx = clip(round(fx / scale)) * scale, scale = 2^ceil(log2t) / 2^(b-1)
(2) NndctFixNeron: qx = clip(round(fx * scale)) * (1 / scale), scale = 2^fp
Let (1) equals (2), we can get
(3): 2^(b-1) / 2^ceil(log2t) = 2^fp
=> fp = b - 1 - ceil(log2t)
For more details, see nndct/include/cuda/nndct_fix_kernels.cuh::_fix_neuron_v2_device
"""
bitwidth = self.bitwidth.item()
ceil_log2t = torch.ceil(self.log_threshold).item()
return [[bitwidth, int(bitwidth - 1 - ceil_log2t)]]
def import_quant_info(self, qinfo):
bitwidth, fp = qinfo
self.bitwidth[0] = bitwidth
self.log_threshold.data = torch.tensor([bitwidth - 1 - fp],
dtype=self.log_threshold.dtype)
self.disable_warmup()
def unfreeze_quant(mod):
if isinstance(mod, TQTQuantizer):
mod.unfreeze_quant() | null |
23,654 | import torch
from pytorch_nndct.utils.hw_dtype import fp32
from pytorch_nndct.nn.quantization.ops import quantize_ops
def get_exponent(tensor):
with torch.no_grad():
t = torch.nan_to_num(tensor, nan=0, posinf=0, neginf=0)
'''
smallest positive subnormal, largest subnormal, smallest positive normal, largest normal
>>> t = torch.tensor([0, 1.4012984643*10**-45, 1.1754942107*10**-38, 1.1754943508 *10**-38, 3.4028234664*10**38])
>>> torch.frexp(t)
torch.return_types.frexp(
mantissa=tensor([0.0000, 0.5000, 1.0000, 0.5000, 1.0000]),
exponent=tensor([0, -148, -126, -125, 128], dtype=torch.int32))
'''
_, exp = torch.frexp(t)
# The exponent of subnormal is less than fp32.emin
exp[(t==0) | (exp < fp32.emin)] = fp32.emin
return exp - 1.0
fp32 = DType('fp32', 'rn', -126, 127, 24)
def cast_to_fp(tensor, exp_bias, m_bits, round_mode, min_val, max_val):
exp = get_exponent(tensor)
# 2**(1 - exp_bias - m_bits) is the smallest representable value
scale = torch.pow(2.0, torch.clamp(exp, 1 - exp_bias, fp32.emax + 1) - m_bits)
return quantize_ops.quantize(tensor, scale, round_mode, min_val, max_val) | null |
23,655 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def _get_exponent_v1(tensor, epsilon=2**-23):
t = tensor.abs()
# we use fp32's 1.mantissa_bits
max_t, _ = t.max(t.dim() - 1, keepdim=True)
max_exp = (max_t + epsilon).log2().floor()
t_exp = (t + epsilon).log2().floor()
return max_exp, t_exp | null |
23,656 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def _get_exponent_v2(tensor):
_, exp = torch.frexp(tensor)
# we use fp32's 1.mantissa_bits
max_exp, _ = exp.max(exp.dim() - 1, keepdim=True)
return max_exp - 1, 1 | null |
23,657 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def _get_exponent_v3(tensor):
tensor_shape = list(tensor.shape)
exponent = torch.ops.vai.calculate_shared_exponent(tensor, tensor_shape[-1])
tensor_shape[-1] = 1
return exponent.reshape(tensor_shape), 1 | null |
23,658 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def _min_max_at_exp(exp, bit_width):
# sign bits: 1, exponent bits: 8, no implicit leading 1
mantissa_bits = bit_width - 9
# The min/max representable value with exp
# x = {sign, exp, mant} = {0 01001101 0000001}
# e = int(01001101) - 127
# M = int(0.000001) = 2^(-6)
# x = (-1)^sign*2^e*M
# = 2^e * 2^(-6) = 2^(e - 6)
min_v = torch.pow(2.0, exp - (mantissa_bits - 1))
max_v = torch.pow(2.0, exp + 1) - min_v
return min_v, max_v | null |
23,659 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def transform_to_block_wise(input, block_size=8, axis=1):
def transform_block_to_shape(input, shape, axis=1):
class BFPQuantizeV1(torch.autograd.Function):
def forward(ctx, t, bit_width, rounding_mode='round_even'):
def quantize_to_bfp_v1(tensor,
bit_width,
block_size,
axis,
rounding_mode):
shape = tensor.shape
tensor = transform_to_block_wise(tensor, block_size, axis)
tensor = BFPQuantizeV1.apply(tensor, bit_width, rounding_mode)
return transform_block_to_shape(tensor, shape, axis) | null |
23,660 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def transpose_to_block_wise(input, block_size=8, axis=1):
def transpose_block_to_shape(input, shape, axis=1):
class BFPQuantizeV2(BFPQuantize):
def forward(ctx, t, bit_width, block_size, rounding_mode='round_even'):
def quantize_to_bfp_v2(tensor, bit_width, block_size, axis, rounding_mode):
shape = tensor.shape
tensor = transpose_to_block_wise(tensor, block_size, axis)
tensor = BFPQuantizeV2.apply(tensor, bit_width, block_size, rounding_mode)
return transpose_block_to_shape(tensor, shape, axis) | null |
23,661 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def pad_to_block_last(tensor, block_size=8, axis=1):
def depad_and_transpose(tensor, shape, axis=1):
class BFPQuantizeV3(BFPQuantize):
def forward(ctx, t, bit_width, block_size, rounding_mode='round_even'):
def quantize_to_bfp_v3(tensor, bit_width, block_size, axis, rounding_mode):
shape = tensor.shape
tensor = pad_to_block_last(tensor, block_size, axis)
tensor = BFPQuantizeV3.apply(tensor, bit_width, block_size, rounding_mode)
return depad_and_transpose(tensor, shape, axis) | null |
23,662 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def transform_to_block_wise(input, block_size=8, axis=1):
def transform_block_to_shape(input, shape, axis=1):
def quantize_to_bfp_prime(tensor,
bit_width,
block_size,
axis,
rounding_mode,
epsilon=torch.pow(torch.tensor(2.0), -23)):
shape = tensor.shape
tensor = transform_to_block_wise(tensor, block_size, axis)
tensor = _to_bfp_prime(tensor, bit_width, rounding_mode, epsilon)
return transform_block_to_shape(tensor, shape, axis) | null |
23,663 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_nndct.nn.quantization.ops import quantize_ops
from pytorch_nndct.utils import onnx_utils
def pad_to_block_last(tensor, block_size=8, axis=1):
def depad_and_transpose(tensor, shape, axis=1):
class BFPPrimeSharedQuantize(BFPQuantize):
def forward(ctx,
t,
bit_width,
block_size,
sub_block_size,
sub_block_shift_bits,
rounding_mode='round_to_nearest'):
def quantize_to_bfp_prime_shared(tensor, bit_width, block_size, sub_block_size,
sub_block_shift_bits, axis, rounding_mode):
if block_size % sub_block_size != 0:
raise ValueError(
f('The block_size must be divisible by sub_block_size.'
'(block_size={block_size}, sub_block_size={sub_block_size})'))
shape = tensor.shape
tensor = pad_to_block_last(tensor, block_size, axis)
tensor = BFPPrimeSharedQuantize.apply(tensor, bit_width, block_size,
sub_block_size, sub_block_shift_bits,
rounding_mode)
return depad_and_transpose(tensor, shape, axis) | null |
23,664 | import math
import numpy as np
import torch
from pytorch_nndct.nn.modules import fix_ops
The provided code snippet includes necessary dependencies for implementing the `_cdf_measure` function. Write a Python function `def _cdf_measure(x, y, measure_name='Kullback-Leibler-J')` to solve the following problem:
Ref paper: "Non-parametric Information-Theoretic Measures of One-Dimensional Distribution Functions from Continuous Time Series" - Paolo D Alberto et al. https://epubs.siam.org/doi/abs/10.1137/1.9781611972795.59 https://epubs.siam.org/doi/pdf/10.1137/1.9781611972795.59 measure_names_symm = ['Camberra', 'Chi-Squared', 'Cramer-von Mises', 'Euclidean', 'Hellinger', 'Jin-L', 'Jensen-Shannon', 'Kolmogorov-Smirnov', 'Kullback-Leibler-J', 'Variational'] measure_names_asym = ['Jin-K', 'Kullback-Leibler-I'] measure_names_excl = ['Bhattacharyya', 'Phi', 'Xi']
Here is the function:
def _cdf_measure(x, y, measure_name='Kullback-Leibler-J'):
"""
Ref paper:
"Non-parametric Information-Theoretic Measures of One-Dimensional
Distribution Functions from Continuous Time Series" - Paolo D Alberto et al.
https://epubs.siam.org/doi/abs/10.1137/1.9781611972795.59
https://epubs.siam.org/doi/pdf/10.1137/1.9781611972795.59
measure_names_symm = ['Camberra', 'Chi-Squared', 'Cramer-von Mises', 'Euclidean',
'Hellinger', 'Jin-L', 'Jensen-Shannon', 'Kolmogorov-Smirnov',
'Kullback-Leibler-J', 'Variational']
measure_names_asym = ['Jin-K', 'Kullback-Leibler-I']
measure_names_excl = ['Bhattacharyya', 'Phi', 'Xi']
"""
if measure_name == 'Bhattacharyya':
return np.sum(np.sqrt(x * y))
else:
if measure_name == 'Camberra':
return np.sum(np.abs(x - y) / (x + y))
else:
if measure_name == 'Chi-Squared':
return np.sum(np.power(x - y, 2.0) / x)
else:
if measure_name == 'Cramer-von Mises':
return np.sum(np.power(x - y, 2.0))
else:
if measure_name == 'Euclidean':
return np.power(np.sum(np.power(x - y, 2.0)), 0.5)
else:
if measure_name == 'Hellinger':
return np.power(np.sum(np.sqrt(x) - np.sqrt(y)), 2.0) / 2.0
else:
if measure_name == 'Jin-K':
return _cdf_measure(x, (x + y) / 2.0, 'Kullback-Leibler-I')
else:
if measure_name == 'Jin-L':
return _cdf_measure(
x, (x + y) / 2.0, 'Kullback-Leibler-I') + _cdf_measure(
y, (x + y) / 2.0, 'Kullback-Leibler-I')
if measure_name == 'Jensen-Shannon':
return (
_cdf_measure(x, (x + y) / 2.0, 'Kullback-Leibler-I') +
_cdf_measure(y,
(x + y) / 2.0, 'Kullback-Leibler-I')) / 2.0
if measure_name == 'Kolmogorov-Smirnov':
return np.max(np.abs(x - y))
if measure_name == 'Kullback-Leibler-I':
return np.sum(x * np.log2(x / y))
if measure_name == 'Kullback-Leibler-J':
return np.sum((x - y) * np.log2(x / y))
if measure_name == 'Phi':
return np.max(
np.abs(x - y) /
np.sqrt(np.minimum((x + y) / 2.0, 1 - (x + y) / 2.0)))
if measure_name == 'Variational':
return np.sum(np.abs(x - y))
if measure_name == 'Xi':
return np.max(
np.abs(x - y) / np.sqrt((x + y) / 2.0 * (1 - (x + y) / 2.0)))
return _cdf_measure(x, y, 'Kullback-Leibler-J') | Ref paper: "Non-parametric Information-Theoretic Measures of One-Dimensional Distribution Functions from Continuous Time Series" - Paolo D Alberto et al. https://epubs.siam.org/doi/abs/10.1137/1.9781611972795.59 https://epubs.siam.org/doi/pdf/10.1137/1.9781611972795.59 measure_names_symm = ['Camberra', 'Chi-Squared', 'Cramer-von Mises', 'Euclidean', 'Hellinger', 'Jin-L', 'Jensen-Shannon', 'Kolmogorov-Smirnov', 'Kullback-Leibler-J', 'Variational'] measure_names_asym = ['Jin-K', 'Kullback-Leibler-I'] measure_names_excl = ['Bhattacharyya', 'Phi', 'Xi'] |
23,665 | from functools import wraps
import torch
def pre_and_post_process_f16_tensor(func):
@wraps(func)
def wrapper(*args, **kwargs):
tensor_type_list = []
tensor_type_dict = {}
out_need_convert = False
for arg in args:
if isinstance(arg, torch.Tensor):
tensor_type_list.append(arg.dtype)
if arg.dtype == torch.float16:
arg.data = arg.data.to(torch.float32)
out_need_convert = True
for key, value in kwargs.items():
if isinstance(value, torch.Tensor):
tensor_type_dict[key] = value.dtype
if value.dtype == torch.float16:
value.data = value.data.to(torch.float32)
out_need_convert = True
out = func(*args, **kwargs)
if out_need_convert:
if isinstance(out, torch.Tensor):
if out.dtype == torch.float32:
out.data = out.data.to(torch.float16)
#for i in range(len(args)):
# if isinstance(args[i], torch.Tensor):
# if args[i].dtype != tensor_type_list[i]:
# args[i].data = args[i].data.to(tensor_type_list[i])
index = 0
for arg in args:
if isinstance(arg, torch.Tensor):
if arg.dtype != tensor_type_list[index]:
arg.data = arg.data.to(tensor_type_list[index])
index = index + 1
for key, value in kwargs.items():
if isinstance(value, torch.Tensor):
if value.dtype != tensor_type_dict[key]:
value.data = value.data.to(tensor_type_dict[key])
return out
return wrapper | null |
23,666 | import torch
import numpy as np
from torch.nn.utils.rnn import PackedSequence
import torch.nn as nn
def deephi_pack_padded_sequence(input, lengths, batch_first=False):
if isinstance(lengths, list):
lengths = torch.LongTensor(lengths)
data = input
if not batch_first:
batch_sizes = np.ones(input.size(0)) * input.size(1)
else:
batch_sizes = np.ones(input.size(1)) * input.size(0)
return PackedSequence(data, batch_sizes)
def pack_padded_sequence(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return nn.utils.rnn.pack_padded_sequence(*args, **kwargs)
return deephi_pack_padded_sequence(*args, **kwargs) | null |
23,667 | import torch
import numpy as np
from torch.nn.utils.rnn import PackedSequence
import torch.nn as nn
def deephi_pad_packed_sequence(sequence,
batch_first=False,
padding_value=0.0,
total_length=None):
output = sequence
if not batch_first:
lengths = np.ones(sequence.size(0)) * sequence.size(1)
else:
lengths = np.ones(sequecne.size(1)) * sequence.size(0)
return output, torch.LongTensor(lengths)
def pad_packed_sequence(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return nn.utils.rnn.pad_packed_sequence(*args, **kwargs)
return deephi_pad_packed_sequence(*args, **kwargs) | null |
23,668 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
_torch_layouts = {2: 'OI', 4: 'OIHW'}
_nndct_layouts = {2: 'OI', 4: 'OHWI'}
def transpose_tensor(tensor, src_layouts, dst_layouts):
if not isinstance(tensor, Tensor):
raise errors.OptimizerDataFormatError(
"'tensor' must be Tensor, but given {}".format(type(tensor)))
if tensor.ndim != 4 and tensor.ndim != 2:
return tensor
src_layout = src_layouts[tensor.ndim]
dst_layout = dst_layouts[tensor.ndim]
axis = [src_layout.index(d) for d in dst_layout]
tensor.transpose(axis)
return tensor
def torch_to_nndct(tensor):
return transpose_tensor(tensor, _torch_layouts, _nndct_layouts) | null |
23,669 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
_torch_layouts = {2: 'OI', 4: 'OIHW'}
_nndct_layouts = {2: 'OI', 4: 'OHWI'}
def transpose_tensor(tensor, src_layouts, dst_layouts):
if not isinstance(tensor, Tensor):
raise errors.OptimizerDataFormatError(
"'tensor' must be Tensor, but given {}".format(type(tensor)))
if tensor.ndim != 4 and tensor.ndim != 2:
return tensor
src_layout = src_layouts[tensor.ndim]
dst_layout = dst_layouts[tensor.ndim]
axis = [src_layout.index(d) for d in dst_layout]
tensor.transpose(axis)
return tensor
def nndct_to_torch(tensor):
return transpose_tensor(tensor, _nndct_layouts, _torch_layouts) | null |
23,670 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
_torch_layouts = {2: 'OI', 4: 'OIHW'}
_nndct_layouts = {2: 'OI', 4: 'OHWI'}
def transpose_tensor(tensor, src_layouts, dst_layouts):
if not isinstance(tensor, Tensor):
raise errors.OptimizerDataFormatError(
"'tensor' must be Tensor, but given {}".format(type(tensor)))
if tensor.ndim != 4 and tensor.ndim != 2:
return tensor
src_layout = src_layouts[tensor.ndim]
dst_layout = dst_layouts[tensor.ndim]
axis = [src_layout.index(d) for d in dst_layout]
tensor.transpose(axis)
return tensor
def torch_tensor_from_nndct(tensor):
replicated_tensor = copy.deepcopy(tensor)
return torch.from_numpy(
transpose_tensor(replicated_tensor, _nndct_layouts, _torch_layouts).data) | null |
23,671 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
def dummy_inputs(input_specs):
inputs = []
for spec in input_specs:
inputs.append(torch.rand(*spec.shape).type(spec.dtype).cuda())
return tuple(inputs) | null |
23,672 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
def unwrap_parallel_module(module):
if isinstance(module, (DataParallel, DistributedDataParallel)):
model = module.module
else:
model = module
return model | null |
23,673 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
import os
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
def is_debug_mode():
return os.environ.get('VAI_OPTIMIZER_DEBUG', None) == '1' | null |
23,674 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
def write_graph_script(graph, filename=None):
if not filename:
_, filename = tempfile.mkstemp(suffix='.py', text=True)
writer = get_script_writer(enable_quant=False)
writer.write(graph, filename)
return filename
def parse_to_graph(module, inputs, debug=False):
if debug:
from nndct_shared.utils import NndctOption, NndctDebugLogger
NndctDebugLogger("vai_opt_debug.log")
NndctOption.nndct_parse_debug.value = 5
parser = parse.TorchParser()
graph = parser(module._get_name(), module, inputs)
if debug:
from nndct_shared.utils import saving
saving.save_graph(graph, hdf5_path='{}.hdf5'.format(graph.name))
write_graph_script(graph, '{}_baseline.py'.format(graph.name))
return graph | null |
23,675 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
def random_str(str_length=4):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(str_length))
def write_graph_script(graph, filename=None):
if not filename:
_, filename = tempfile.mkstemp(suffix='.py', text=True)
writer = get_script_writer(enable_quant=False)
writer.write(graph, filename)
return filename
def rebuild_model(graph, filename=None):
filename = write_graph_script(graph, filename)
#module_name = graph.name
py_module_name = "_".join(["nndct", random_str()])
spec = importlib.util.spec_from_file_location(py_module_name, filename)
py_module = importlib.util.module_from_spec(spec)
sys.modules[py_module_name] = py_module
spec.loader.exec_module(py_module)
rebuilt_model = py_module.__dict__[graph.name]()
api_utils.connect_module_with_graph(rebuilt_model, graph)
return rebuilt_model, filename | null |
23,676 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
def map_rebuilt_module_to_node(model, graph):
module_to_node = {}
for name, module in model.named_children():
# module_name -> node_id
idx = int(name.split(TorchSymbol.MODULE_NAME_SEPERATOR)[-1])
node = graph.get_node_by_idx(idx)
module_to_node[name] = node
return module_to_node | null |
23,677 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
def map_original_module_to_node(model, graph):
module_to_node = {}
for node in graph.nodes:
module = mod_util.get_module_by_node(model, node)
if module:
module_to_node[id(module)] = node.name
return module_to_node
def excluded_node_names(model, graph, excludes):
excluded_nodes = []
module_to_node = map_original_module_to_node(model, graph)
for exclude in excludes:
if isinstance(exclude, str):
excluded_nodes.append(exclude)
elif isinstance(exclude, torch.nn.Module):
for module in exclude.modules():
module_id = id(module)
if module_id in module_to_node:
excluded_nodes.append(module_to_node[module_id])
else:
raise errors.OptimizerInvalidArgumentError(
'Excludes must be either string or torch.nn.Module')
return excluded_nodes | null |
23,678 | import copy
import importlib
import os
import random
import string
import sys
import tempfile
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.pruning import errors
from pytorch_nndct import parse
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.parse import parse_utils
from pytorch_nndct.qproc import utils as api_utils
from pytorch_nndct.utils import TorchSymbol
from pytorch_nndct.utils import module_util as mod_util
import os
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
def get_actual_device(gpu: int) -> int:
gpu = int(gpu)
if 'CUDA_VISIBLE_DEVICES' in os.environ:
available_devices = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
if gpu >= len(available_devices):
raise ValueError(
f"CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']}, arg gpu must be less than {len(available_devices)}"
)
return int(available_devices[gpu])
else:
return gpu | null |
23,679 | import collections
import copy
import json
import numpy as np
import os
import random
import torch
import torch.multiprocessing as mp
import types
from typing import List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.pruning import utils as spu
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import common
from pytorch_nndct import parse
from pytorch_nndct.pruning import utils
from pytorch_nndct.utils import module_util as mod_util
from pytorch_nndct.utils import profiler
from pytorch_nndct.utils import torch_const
from pytorch_nndct.utils.calibration import calibrate_sens, calibrate_spec
class IterativePruningRunner(PruningRunner):
def __init__(self, model, input_signature):
super(IterativePruningRunner, self).__init__(model, input_signature)
self._sens_path = os.path.join(_VAI_DIR, self._graph.name + '.sens')
def _load_analysis_result(self):
return sens.load_sens(self._sens_path) if os.path.exists(
self._sens_path) else None
def ana(self,
eval_fn,
args=(),
gpus=None,
excludes=None,
forced=False,
with_group_conv: bool = False):
"""Performs model analysis.
Arguments:
eval_fn: Callable object that takes a `torch.nn.Module` object as its
first argument and returns the evaluation score.
args: A tuple of arguments that will be passed to eval_fn.
gpus: A tuple or list of gpu indices used for model analysis. If not set,
the default gpu will be used.
excludes: A list of node name or torch module to be excluded from pruning.
"""
net_sens = None if forced else self._load_analysis_result()
excluded_nodes = self._get_exclude_nodes(excludes) if excludes else []
analyser = sens.ModelAnalyser(self._graph, excluded_nodes, with_group_conv)
if net_sens:
analyser.recover_state(calibrate_sens(net_sens, self._graph))
uncompleted_steps = analyser.uncompleted_steps()
if len(uncompleted_steps) == 0:
logging.info(
'Skip model analysis and use cached result, if you do not want to use it, set forced=True'
)
return
gpu = gpus[0] if gpus else 0
self._ana_pre_check(gpu, eval_fn, args, excluded_nodes, with_group_conv)
num_parallel = len(gpus) if gpus else 1
if num_parallel > 1:
self._parallel_ana(gpus, eval_fn, args, analyser, excluded_nodes,
with_group_conv)
else:
self._ana(gpu, eval_fn, args, analyser)
def _ana_pre_check(self,
gpu,
eval_fn,
args,
excludes,
with_group_conv: bool = False):
"""Prune model but not test it to check if all pruning steps can pass."""
logging.info('Pre-checking for analysis...')
groups = pruning_lib.group_nodes(self._graph, excludes, with_group_conv)
spec = pruning_lib.PruningSpec.from_node_groups(groups, 0.9)
model, pruned_graph, pruning_info = self._prune(spec, mode='sparse')
current_env = copy.deepcopy(os.environ)
os.environ["CUDA_VISIBLE_DEVICES"] = str(utils.get_actual_device(gpu))
model.eval()
model.cuda()
eval_fn(model, *args)
os.environ = current_env
def _ana(self, gpu, eval_fn, args, analyser):
uncompleted_steps = copy.copy(analyser.uncompleted_steps())
total_steps = analyser.steps()
try:
for step in uncompleted_steps:
spec = analyser.spec(step)
model, _, _ = self._prune(spec, mode='sparse')
current_env = copy.deepcopy(os.environ)
os.environ["CUDA_VISIBLE_DEVICES"] = str(utils.get_actual_device(gpu))
model.eval()
model.cuda()
eval_res = eval_fn(model, *args)
os.environ = current_env
analyser.record(
step,
eval_res.item() if isinstance(eval_res, torch.Tensor) else eval_res)
analyser.save(self._sens_path)
logging.info('Analysis complete %d/%d' % (step + 1, total_steps))
finally:
analyser.save(self._sens_path)
def _parallel_ana(self,
gpus,
eval_fn,
args,
analyser,
excludes,
with_group_conv: bool = False):
graph = utils.parse_to_graph(self._model, self._input_signature)
total_steps = analyser.steps()
uncompleted_steps = copy.copy(analyser.uncompleted_steps())
ctx = mp.get_context('spawn')
error_queues = []
input_queue = ctx.Queue()
output_queue = ctx.Queue()
for step in uncompleted_steps:
input_queue.put(step)
try:
processes = []
# To avoid CUDA-out-of-memory error. Main process should execute one copy of task
# using the same GPU on which we do torch tracing
input_signature_device = self._input_signature.device
for rank in range(len(gpus)):
# For the GPU on which we do torch tracing, the task should be executed by main process instead of sub-processes
if input_signature_device.type == "cuda" and input_signature_device.index == int(
gpus[rank]):
continue
current_env = copy.deepcopy(os.environ)
p = ctx.Process(
target=AnaTask(
self._model,
self._input_signature,
excludes,
gpus[rank],
eval_fn,
eval_fn_args=args,
with_group_conv=with_group_conv),
args=(input_queue, output_queue))
p.start()
os.environ = current_env
processes.append(p)
# Main process executes one copy of task
if input_signature_device.type == "cuda":
current_env = copy.deepcopy(os.environ)
ana_task = AnaTask(
self._model,
self._input_signature,
excludes,
input_signature_device.index,
eval_fn,
eval_fn_args=args)
ana_task(input_queue, output_queue)
os.environ = current_env
for p in processes:
p.join()
finally:
while not output_queue.empty():
cur_step, score = output_queue.get()
analyser.record(cur_step, score)
analyser.save(self._sens_path)
def prune(self,
removal_ratio=None,
threshold=None,
spec_path=None,
excludes=None,
mode='sparse',
pruning_info_path=None,
channel_divisible=2):
"""Prune the network by given removal_ratio or threshold.
Arguments:
removal_ratio: The expected percentage of macs reduction. This is just a hint
value, the actual macs drop not necessarily strictly to this value.
threshold: Relative proportion of model performance loss
that can be tolerated.
spec_path: Pre-defined pruning specification.
excludes: Modules that need to excludes from pruning.
mode: One of ['sparse', 'slim'].
channel_divisible: The number of remaining channels in the pruned layer
can be divided by channel_divisble.
Return:
A `torch.nn.Module` object with addtional pruning info.
"""
if not isinstance(removal_ratio, float):
raise errors.OptimizerInvalidArgumentError(
'Expected float "ratio", but got {}({})'.format(
removal_ratio, type(removal_ratio)))
net_sens = self._load_analysis_result()
if net_sens is None:
raise errors.OptimizerNoAnaResultsError(
"Must call ana() before model pruning.")
excluded_nodes = self._get_exclude_nodes(excludes) if excludes else []
training_flag_info = {}
for name, module in self._model.named_modules():
training_flag_info[name] = module.training
if removal_ratio:
logging.info('Pruning ratio = {}'.format(removal_ratio))
spec = self._spec_from_ratio(net_sens, removal_ratio, excluded_nodes)
spec.channel_divisible = channel_divisible
elif threshold:
logging.info('Pruning threshold = {}'.format(threshold))
spec = self._spec_from_threshold(net_sens, threshold, excluded_nodes)
spec.channel_divisible = channel_divisible
elif spec_path:
logging.info('Pruning specification = {}'.format(spec_path))
spec = pruning_lib.PruningSpec.deserialize(
json.load(open(spec_path, 'r')))
else:
raise errors.OptimizerInvalidArgumentError(
'One of [ratio, threshold, spec_path] must be given.')
spec_path = os.path.join(
_VAI_DIR, '{}_ratio_{}.spec'.format(self._graph.name, removal_ratio))
with open(spec_path, 'w') as f:
json.dump(spec.serialize(), f, indent=2)
logging.info('Pruning spec saves in {}'.format(spec_path))
model, pruned_graph, pruning_info = self._prune(spec, mode)
pruning_info_path = pruning_info_path if pruning_info_path else spec_path.split(
'.spec')[0] + '_pruning_info.json'
self.generate_pruning_info(pruning_info_path, pruning_info)
model._graph = pruned_graph
model._pruning_info = pruning_info
if mode == 'sparse':
model._register_state_dict_hook(_remove_mask)
model.slim_state_dict = types.MethodType(slim_state_dict, model)
else:
model.sparse_state_dict = types.MethodType(sparse_state_dict, model)
for name, module in model.named_modules():
module.training = training_flag_info[name]
logging.info('Pruning summary:')
slim_model = self._prune(spec, 'slim')[0]
self._summary(slim_model)
return model
def transform(self, spec_path, script_path='pruned_model.py'):
with open(spec_path, 'r') as f:
spec = json.load(f)
_, pruned_graph, _ = self._prune(spec, 'slim')
pruned_model, _ = utils.rebuild_model(pruned_graph)
return pruned_model
def _spec_from_threshold(self, net_sens, threshold, excludes):
groups = net_sens.prunable_groups_by_threshold(threshold, excludes)
return pruning_lib.PruningSpec(groups)
def _spec_from_ratio(self, net_sens, ratio, excludes):
logging.info('Searching for appropriate ratio for each layer...')
macs, _ = profiler.model_complexity(self._model, self._input_signature)
expected_macs = (1 - ratio) * macs
macs_tolerance = 1e-2
min_th = 1e-5
max_th = 1 - min_th
num_attempts = 0
max_attempts = 100
prev_spec = None
cur_spec = None
while num_attempts < max_attempts:
prev_spec = cur_spec
num_attempts += 1
threshold = (min_th + max_th) / 2
cur_spec = self._spec_from_threshold(net_sens, threshold, excludes)
if prev_spec and prev_spec == cur_spec:
continue
pruned_model = self._prune(cur_spec, 'slim')[0]
current_macs, _ = profiler.model_complexity(pruned_model,
self._input_signature)
error = abs(expected_macs - current_macs) / expected_macs
if error < macs_tolerance:
break
if current_macs < expected_macs:
max_th = threshold
else:
min_th = threshold
return cur_spec
class OneStepPruningRunner(PruningRunner):
"""EagleEye."""
def __init__(self, model, input_signature):
super(OneStepPruningRunner, self).__init__(model, input_signature)
self._searcher_saved_path = os.path.join(_VAI_DIR,
self._graph.name + '.search')
def search(self,
gpus=['0'],
calibration_fn=None,
calib_args=(),
num_subnet=200,
removal_ratio=0.5,
excludes=[],
eval_fn=None,
eval_args=(),
forced=False,
with_group_conv: bool = False):
"""
Perform pruned candidates search.
Arguments:
gpus: A tuple or list of gpu indices used for model analysis. If not set,
the default gpu will be used.
calibration_fn: Callable object that takes a `torch.nn.Module` object as
its first argument. It's for calibrating BN layer's statistics.
calib_args: A tuple of arguments that will be passed to calibration_fn.
num_subnet: The number of subnets needed to search matching the macs
requirement.
removal_ratio: The expected percentage of macs reduction.
excludes: Modules that need to exclude from pruning.
eval_fn: Callable object that takes a `torch.nn.Module` object as its
first argument and returns the evaluation score.
eval_args: A tuple of arguments that will be passed to eval_fn.
"""
self._searcher_saved_path = os.path.join(
_VAI_DIR, '{}_ratio_{}.search'.format(self._graph.name, removal_ratio))
excluded_nodes = self._get_exclude_nodes(excludes) if excludes else []
orig_macs, orig_params = profiler.model_complexity(
self._model, self._input_signature, readable=False)
searcher = None
if not forced and os.path.exists(self._searcher_saved_path):
searcher = search.load_searcher(self._searcher_saved_path)
if len(searcher._subnets) >= num_subnet:
logging.info(
'Skip subnet search and use cached result, if you do not want to use it, set forced=True'
)
return
if not searcher:
groups = pruning_lib.group_nodes(self._graph, excluded_nodes,
with_group_conv)
searcher = search.SubnetSearcher(groups)
base_score = eval_fn(self._model, *eval_args)
if isinstance(base_score, torch.Tensor):
base_score = base_score.item()
searcher.set_supernet(base_score, orig_macs)
self._search_random_precheck(calibration_fn, eval_fn, removal_ratio,
excluded_nodes, calib_args, eval_args,
orig_macs, with_group_conv)
num_parallel = len(gpus) if gpus else 1
if num_parallel > 1:
self._parallel_search_random(gpus, calibration_fn, eval_fn, num_subnet,
removal_ratio, excluded_nodes, calib_args,
eval_args, searcher, orig_macs,
with_group_conv)
else:
self._search_random(calibration_fn, eval_fn, num_subnet, removal_ratio,
excluded_nodes, calib_args, eval_args, searcher,
orig_macs, with_group_conv)
def _parallel_search_random(self,
gpus,
calibration_fn,
eval_fn,
num_subnet,
removal_ratio,
excluded_nodes,
calib_args,
eval_args,
searcher,
orig_macs,
with_group_conv: bool = False):
ctx = mp.get_context('spawn')
output_queue = ctx.Queue()
input_queue = ctx.Queue()
for index in range(len(searcher._subnets), num_subnet):
input_queue.put(index)
try:
processes = []
# To avoid CUDA-out-of-memory error. Main process should execute one copy of task
# using the same GPU on which we do torch tracing
input_signature_device = self._input_signature.device
for rank in range(len(gpus)):
# For the GPU on which we do torch tracing, the task should be executed by main process instead of sub-processes
if input_signature_device.type == "cuda" and input_signature_device.index == int(
gpus[rank]):
continue
current_env = copy.deepcopy(os.environ)
p = ctx.Process(
target=RandomSearchTask(
self._model,
self._input_signature,
removal_ratio,
num_subnet,
orig_macs,
excluded_nodes,
gpus[rank],
eval_fn,
calibration_fn,
eval_args=eval_args,
calib_args=calib_args,
with_group_conv=with_group_conv),
args=(input_queue, output_queue))
p.start()
os.environ = current_env
processes.append(p)
# Main process executes one copy of task
if input_signature_device.type == "cuda":
current_env = copy.deepcopy(os.environ)
search_task = RandomSearchTask(
self._model,
self._input_signature,
removal_ratio,
num_subnet,
orig_macs,
excluded_nodes,
input_signature_device.index,
eval_fn,
calibration_fn,
eval_args=eval_args,
calib_args=calib_args,
with_group_conv=with_group_conv)
search_task(input_queue, output_queue)
os.environ = current_env
for p in processes:
p.join()
finally:
while not output_queue.empty():
searcher.add_subnet(*output_queue.get())
search.save_searcher(searcher, self._searcher_saved_path)
def _search_random_precheck(self,
calibration_fn,
eval_fn,
removal_ratio,
excluded_nodes,
calib_args,
eval_args,
orig_macs,
with_group_conv: bool = False):
groups = pruning_lib.group_nodes(self._graph, excluded_nodes,
with_group_conv)
spec = pruning_lib.PruningSpec.from_node_groups(groups, 0.9)
pruned_model, pruned_graph, pruning_info = self._prune(spec, mode='slim')
macs, params = profiler.model_complexity(
pruned_model, self._input_signature, readable=False)
cur_removal_ratio = 1 - macs / orig_macs
calibration_fn(pruned_model, *(calib_args))
score = eval_fn(pruned_model, *(eval_args))
if isinstance(score, torch.Tensor):
score = score.item()
logging.info('ratios={}, score={}, cur_removal_ratio={}'.format(
removal_ratio, score, cur_removal_ratio))
def _search_random(self,
calibration_fn,
eval_fn,
num_subnet,
removal_ratio,
excluded_nodes,
calib_args,
eval_args,
searcher,
orig_macs,
with_group_conv: bool = False):
groups = pruning_lib.group_nodes(self._graph, excluded_nodes,
with_group_conv)
ratio_min = 0.05
ratio_max = 0.9
epsilon = 0.01
index = len(searcher._subnets) + 1
try:
while True:
ratios = []
spec = pruning_lib.PruningSpec()
for group in groups:
if removal_ratio >= 0.5:
mu = random.uniform(removal_ratio / 2, removal_ratio)
else:
mu = random.uniform(0, removal_ratio / 2)
sigma = random.uniform(0, 0.3)
pruning_ratio = random.gauss(mu, sigma)
pruning_ratio = np.clip(pruning_ratio, ratio_min, ratio_max)
ratios.append(pruning_ratio)
spec.add_group(
pruning_lib.PrunableGroup(group.nodes, pruning_ratio,
group.num_groups))
pruned_model, pruned_graph, pruning_info = self._prune(
spec, mode='slim')
macs, params = profiler.model_complexity(
pruned_model, self._input_signature, readable=False)
cur_removal_ratio = 1 - macs / orig_macs
if abs(removal_ratio - cur_removal_ratio) < epsilon:
calibration_fn(pruned_model, *(calib_args))
score = eval_fn(pruned_model, *(eval_args))
if isinstance(score, torch.Tensor):
score = score.item()
searcher.add_subnet(ratios, score, 1 - cur_removal_ratio)
search.save_searcher(searcher, self._searcher_saved_path)
logging.info('Search complete %d/%d' % (index, num_subnet))
#logging.info('Index={}, ratios={}, score={}, cur_removal_ratio={}'.format(
# index, ratios, score, cur_removal_ratio))
index += 1
#logging.info('cur_removal_ratio={}'.format(cur_removal_ratio))
if index > num_subnet:
break
finally:
search.save_searcher(searcher, self._searcher_saved_path)
logging.info('Search %d subnets!' % (len(searcher._subnets)))
def prune(self,
mode='slim',
removal_ratio=None,
index=None,
pruning_info_path=None,
channel_divisible=2):
"""Get pruned candidate subnet of the specific index.
Arguments:
mode: One of ['sparse', 'slim'].
removal_ratio: Pruning ratio for model.
index: Subnet index. By default, the optimal subnet is selected automatically.
Return:
A `torch.nn.Module` object with addtional pruning info.
"""
assert removal_ratio, 'Pruning ratio for model is needed!'
self._searcher_saved_path = os.path.join(
_VAI_DIR, '{}_ratio_{}.search'.format(self._graph.name, removal_ratio))
searcher = search.load_searcher(self._searcher_saved_path)
groups = searcher.groups
if index:
subnet = searcher.subnet(index)
else:
subnet = searcher.best_subnet()
logging.info('Sparsity={}, score={}'.format(1 - subnet.macs, subnet.score))
assert len(groups) == len(subnet.ratios)
spec = searcher.spec(subnet.ratios)
spec_path = self._searcher_saved_path.split(
'.search')[0] + '_{}.spec'.format(index if index else 'best')
with open(spec_path, 'w') as f:
json.dump(spec.serialize(), f, indent=2)
logging.info('Pruning spec saves in {}'.format(spec_path))
spec.channel_divisible = channel_divisible
model, pruned_graph, pruning_info = self._prune(spec, mode=mode)
pruning_info_path = pruning_info_path if pruning_info_path else self._searcher_saved_path.split(
'.search')[0] + '_pruning_info.json'
self.generate_pruning_info(pruning_info_path, pruning_info)
model._graph = pruned_graph
model._pruning_info = pruning_info
model._register_state_dict_hook(_remove_mask)
if mode == 'sparse':
model.slim_state_dict = types.MethodType(slim_state_dict, model)
else:
model.sparse_state_dict = types.MethodType(sparse_state_dict, model)
return model
def get_pruning_runner(model, input_signature, method):
assert method in ['iterative', 'one_step']
cls = IterativePruningRunner if method == 'iterative' else OneStepPruningRunner
return cls(model, input_signature) | null |
23,680 | import collections
import copy
import json
import numpy as np
import os
import random
import torch
import torch.multiprocessing as mp
import types
from typing import List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.pruning import utils as spu
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import common
from pytorch_nndct import parse
from pytorch_nndct.pruning import utils
from pytorch_nndct.utils import module_util as mod_util
from pytorch_nndct.utils import profiler
from pytorch_nndct.utils import torch_const
from pytorch_nndct.utils.calibration import calibrate_sens, calibrate_spec
def sparse_state_dict(self, destination=None, prefix='', keep_vars=False):
state_dict = self.state_dict(destination, prefix, keep_vars)
for node in self._graph.nodes:
pruning_info = self._pruning_info.get(node.name, None)
if not pruning_info:
continue
for tensor in node.op.params.values():
# ResNet::layer1.1.conv1.weight -> layer1.1.conv1.weight
key = tensor.name.lstrip(self._graph.name +
torch_const.TorchGraphSymbol.GRAPH_SCOPE_SYM)
value = utils.pad_to_sparse_tensor(state_dict[key], pruning_info)
state_dict[key] = value
return state_dict | null |
23,681 | import collections
import copy
import json
import numpy as np
import os
import random
import torch
import torch.multiprocessing as mp
import types
from typing import List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.pruning import utils as spu
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import common
from pytorch_nndct import parse
from pytorch_nndct.pruning import utils
from pytorch_nndct.utils import module_util as mod_util
from pytorch_nndct.utils import profiler
from pytorch_nndct.utils import torch_const
from pytorch_nndct.utils.calibration import calibrate_sens, calibrate_spec
_CONVTRANSPOSE_OPS = [OpTypes.CONVTRANSPOSE2D, OpTypes.CONVTRANSPOSE3D]
def _prune_tensor(tensor, out_dims, in_dims):
"""Remove pruned channels of given tensor and returns a slim tensor."""
dim_size = len(tensor.shape)
ndarray = tensor.detach().cpu().numpy()
out_axis, in_axis = 0, 1
if out_dims:
ndarray = np.delete(ndarray, out_dims, axis=out_axis)
if in_dims and dim_size > in_axis:
ndarray = np.delete(ndarray, in_dims, axis=in_axis)
return torch.from_numpy(ndarray)
The provided code snippet includes necessary dependencies for implementing the `slim_state_dict` function. Write a Python function `def slim_state_dict(self, destination=None, prefix='', keep_vars=False)` to solve the following problem:
Returns a slim state dict in which the weight names are same with the original model and the tensors are pruned to slim ones.
Here is the function:
def slim_state_dict(self, destination=None, prefix='', keep_vars=False):
"""Returns a slim state dict in which the weight names are same with the
original model and the tensors are pruned to slim ones.
"""
assert prefix == ''
if destination is None:
destination = collections.OrderedDict()
destination._metadata = collections.OrderedDict()
state_dict = self.state_dict(None, prefix, keep_vars)
for node in self._graph.nodes:
node_pruning = self._pruning_info.get(node.name, None)
for param, tensor in node.op.params.items():
# ResNet::layer1.1.conv1.weight -> layer1.1.conv1.weight
key = utils.state_dict_key_from_tensor(tensor)
value = state_dict[key]
if not node_pruning:
destination[key] = value
else:
out_dims = node_pruning.removed_outputs
in_dims = node_pruning.removed_inputs
if (node.op.type in _CONVTRANSPOSE_OPS and
param == node.op.ParamName.WEIGHTS):
out_dims, in_dims = in_dims, out_dims
destination[key] = _prune_tensor(value, out_dims, in_dims)
# The model during train/inference procedure may be differnt.
# 'self._graph' is generated according to inference model,
# so some keys in state_dict may not in destination.
for key in state_dict:
if key not in destination:
destination[key] = state_dict[key]
return destination | Returns a slim state dict in which the weight names are same with the original model and the tensors are pruned to slim ones. |
23,682 | import collections
import copy
import json
import numpy as np
import os
import random
import torch
import torch.multiprocessing as mp
import types
from typing import List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.pruning import utils as spu
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import common
from pytorch_nndct import parse
from pytorch_nndct.pruning import utils
from pytorch_nndct.utils import module_util as mod_util
from pytorch_nndct.utils import profiler
from pytorch_nndct.utils import torch_const
from pytorch_nndct.utils.calibration import calibrate_sens, calibrate_spec
def generate_indices_group(indices: List[int], dim_size: int,
groups: int) -> List[List[int]]:
indices_set = set(indices)
interval: int = dim_size // groups
start_idx = 0
end_idx = interval
ret: List[List[int]] = []
while start_idx < dim_size:
idx_group: List[int] = []
for i in range(start_idx, end_idx):
if i in indices_set:
idx_group.append(i - start_idx)
ret.append(idx_group)
start_idx = end_idx
end_idx += interval
return ret
The provided code snippet includes necessary dependencies for implementing the `_sparsify_tensor` function. Write a Python function `def _sparsify_tensor(tensor: torch.Tensor, out_dims: List[int], in_dims: List[int], groups: int = 1)` to solve the following problem:
Fill 0 in removed channels.
Here is the function:
def _sparsify_tensor(tensor: torch.Tensor,
out_dims: List[int],
in_dims: List[int],
groups: int = 1):
"""Fill 0 in removed channels."""
device = tensor.device
tensor = tensor.detach().cpu().clone(memory_format=torch.contiguous_format)
dim_size = len(tensor.shape)
if groups == 1:
if out_dims:
tensor[out_dims, ...] = 0
# data format in pytorch: OIHW
if in_dims and dim_size > 1:
tensor[:, in_dims, ...] = 0
else:
out_dims_group = generate_indices_group(out_dims, tensor.size(0), groups)
in_dims_group = generate_indices_group(
in_dims,
tensor.size(1) * groups, groups) if dim_size > 1 else [[]] * groups
parts = tensor.split(tensor.size(0) // groups, dim=0)
sparse_parts: List[torch.Tensor] = []
for part, o, i in zip(parts, out_dims_group, in_dims_group):
sparse_parts.append(_sparsify_tensor(part, o, i))
tensor = torch.cat(sparse_parts, dim=0)
return tensor.to(device) | Fill 0 in removed channels. |
23,683 | import collections
import copy
import json
import numpy as np
import os
import random
import torch
import torch.multiprocessing as mp
import types
from typing import List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.pruning import utils as spu
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import common
from pytorch_nndct import parse
from pytorch_nndct.pruning import utils
from pytorch_nndct.utils import module_util as mod_util
from pytorch_nndct.utils import profiler
from pytorch_nndct.utils import torch_const
from pytorch_nndct.utils.calibration import calibrate_sens, calibrate_spec
def _apply_mask(module, inputs):
for tensor_name in module._tensor_names:
weight = getattr(module, tensor_name)
mask = getattr(module, tensor_name + '_mask')
weight.data = mask.to(dtype=weight.dtype) * weight | null |
23,684 | import collections
import copy
import json
import numpy as np
import os
import random
import torch
import torch.multiprocessing as mp
import types
from typing import List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.pruning import utils as spu
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import common
from pytorch_nndct import parse
from pytorch_nndct.pruning import utils
from pytorch_nndct.utils import module_util as mod_util
from pytorch_nndct.utils import profiler
from pytorch_nndct.utils import torch_const
from pytorch_nndct.utils.calibration import calibrate_sens, calibrate_spec
def _remove_mask(self, destination=None, prefix='', keep_vars=False):
keys = list(destination.keys())
for key in keys:
if '_mask' in key:
del destination[key]
return destination | null |
23,685 | from collections import ChainMap, defaultdict
from enum import Enum
from tqdm import tqdm
import torch
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import (Graph, Tensor, Block, Node,
reorder_multi_subgraph_nodes)
from nndct_shared.utils import NndctDebugLogger, NndctOption, NndctScreenLogger, QError, QWarning, QNote
from pytorch_nndct.utils import build_aten_torch_ops_table, TorchGraphSymbol
from .op_dispatcher import *
from .parse_utils import *
from .parser_post_process import change_addmm_to_linear
from .torch_op_def import TorchUnknownOperation
from .trace_helper import TorchGraphHandler
from typing import Mapping
from .rich_in_out_helper import FlattenInOutModelForTrace, StandardInputData
class TorchUnknownOperation(Operation):
def __init__(self, nndct_op_type):
super().__init__(nndct_op_type)
def unknown_op_type_check(graph: Graph):
unkown_ops = set()
custom_ops = set()
for node in graph.all_nodes():
if isinstance(node.op, TorchUnknownOperation):
unkown_ops.add(node.op.type)
elif node.has_custom_op():
custom_ops.add(node.op.type)
for op in custom_ops:
NndctScreenLogger().warning2user(QWarning.FLOAT_OP, f"The quantizer recognize new op `{op}` as a float operator by default.")
# if custom_ops:
# NndctScreenLogger().info(f"You can make these new ops quantizable by add them to custom_quant_ops, \
# e.g. quantizer= torch_quantizer(..., custom_quant_ops=['{list(custom_ops)[0]}',...])")
NndctScreenLogger().check2user(QError.UNSUPPORTED_OPS, f"Unsupported Ops: {unkown_ops}.", len(unkown_ops)==0) | null |
23,686 | from nndct_shared.nndct_graph.base_tensor import Tensor
from pytorch_nndct.utils import TorchGraphSymbol
from .rich_in_out_helper import FlattenInOutModelForTrace
def convert_np_type_to_pytorch_type(np_type):
return {
'int64': 'torch.int64',
'int32': 'torch.int32',
'float32': 'torch.float',
'float64': 'torch.double'
}.get(np_type, np_type) | null |
23,687 | from nndct_shared.nndct_graph.base_tensor import Tensor
from pytorch_nndct.utils import TorchGraphSymbol
from .rich_in_out_helper import FlattenInOutModelForTrace
def convert_dtype_between_np_and_pytorch(dtype):
return {
'int64': 'torch.int64',
'int32': 'torch.int32',
'float32': 'torch.float',
'float64': 'torch.double',
'torch.int64': 'int64',
'torch.long': 'int64',
'torch.int32': 'int32',
'torch.int': 'int32',
'torch.float32': 'float32',
'torch.float': 'float32',
'torch.float64': 'float64',
'torch.double': 'float64',
}.get(dtype, dtype) | null |
23,688 | from nndct_shared.nndct_graph.base_tensor import Tensor
from pytorch_nndct.utils import TorchGraphSymbol
from .rich_in_out_helper import FlattenInOutModelForTrace
_GRAPH_SCOPE_SYM = TorchGraphSymbol.GRAPH_SCOPE_SYM
class FlattenInOutModelForTrace(torch.nn.Module):
def getModelName(cls):
return 'FlattenInOutModelForTrace'
def getOriginModelNameFormString(cls, data_str):
names = re.findall(r"nndct_st_([\w_]+)_ed", data_str)
if len(names) > 0:
return names[0]
else:
return None
def check_need_recovery_name(cls, name):
return 'nndct_st_' in name or 'FlattenInOutModelForTrace' in name
def recovery_tensor_name(cls, name):
return re.sub(r"nndct_st_[\w_]+_ed\.", '', name)
def recovery_node_scope_name(cls, scope_name):
real_class_name = re.findall(r"FlattenInOutModelForTrace/(.*)?\[nndct_st_[\w_]+_ed\]", scope_name)
if len(real_class_name) > 0:
scope_name = re.sub(r"FlattenInOutModelForTrace/(.*)?\[nndct_st_[\w_]+_ed\]", real_class_name[0], scope_name)
real_model_name = re.findall(r".*FlattenInOutModelForTrace::/(.*::)nndct_st_[\w_]+_ed", scope_name)
if len(real_model_name) > 0:
scope_name = re.sub(r".*FlattenInOutModelForTrace::/(.*::)nndct_st_[\w_]+_ed", real_model_name[0], scope_name)
return scope_name
def __init__(self, inner_model, input_schema) -> None:
super().__init__()
self.module_name = "nndct_st_" + inner_model._get_name() + "_ed"
setattr(self, self.module_name, inner_model)
self.input_schema = input_schema
self.training = inner_model.training
def forward(self, *flatten_input):
input = self.input_schema(flatten_input)
output = getattr(self, self.module_name)(*input['args'], **input['kwargs'])
flatten_output, _ = flatten_to_tuple(output)
return flatten_output
The provided code snippet includes necessary dependencies for implementing the `get_full_name` function. Write a Python function `def get_full_name(graph_name: str, name: str) -> str` to solve the following problem:
get the full name of node/tensor in graph Args: graph_name (str): graph name Returns: str: full name
Here is the function:
def get_full_name(graph_name: str, name: str) -> str:
"""get the full name of node/tensor in graph
Args:
graph_name (str): graph name
Returns:
str: full name
"""
name = _GRAPH_SCOPE_SYM.join([graph_name, name])
name = FlattenInOutModelForTrace.recovery_tensor_name(name)
name = FlattenInOutModelForTrace.recovery_node_scope_name(name)
return name | get the full name of node/tensor in graph Args: graph_name (str): graph name Returns: str: full name |
23,689 | from nndct_shared.nndct_graph.base_tensor import Tensor
from pytorch_nndct.utils import TorchGraphSymbol
from .rich_in_out_helper import FlattenInOutModelForTrace
def get_short_name(full_name: str) -> str:
"""get the name of node/tensor in graph without graph name
Args:
full_name (str): full name of node/tensor
Returns:
str: short name
"""
return full_name.split(_GRAPH_SCOPE_SYM)[-1]
The provided code snippet includes necessary dependencies for implementing the `get_formal_name` function. Write a Python function `def get_formal_name(hier_name: str) -> str` to solve the following problem:
replace `.` with `_` Args: hier_name (str): "layer_0.layer_1" Returns: str: "layer_0_layer_1"
Here is the function:
def get_formal_name(hier_name: str) -> str:
"""replace `.` with `_`
Args:
hier_name (str): "layer_0.layer_1"
Returns:
str: "layer_0_layer_1"
"""
return get_short_name(hier_name.replace(".", "_")) | replace `.` with `_` Args: hier_name (str): "layer_0.layer_1" Returns: str: "layer_0_layer_1" |
23,690 | from nndct_shared.nndct_graph.base_tensor import Tensor
from pytorch_nndct.utils import TorchGraphSymbol
from .rich_in_out_helper import FlattenInOutModelForTrace
class TorchScriptModuleHandler(object):
def __init__(self):
def build_torch_graph(self, graph_name, script_module, *args):
def rename_graph_inputs(graph):
def _optimize_raw_graph(self, graph):
def _is_param_const_node(fw_node, raw_graph):
def _build_raw_graph(self, graph_name, fw_graph, params=None, blobs=None):
def _create_ret_value(self, graph, raw_graph):
def _create_inputs_value(self, graph, raw_graph):
def _create_attrs_value(self, graph, raw_graph):
def _create_params_value(self, graph, script_module):
def _add_node(self, fw_node, raw_graph):
def _execute_optimize(self, raw_graph):
class TorchGraphHandler(object):
def __init__(self):
def _check_control_flow(fw_graph):
def _get_fw_graph_from_module(self, module, input_data, train):
def build_torch_graph(self, graph_name, module, input_args, train=False):
def _trace_graph_from_model(self, module, input_args, train):
def _get_graph_from_script(self, module, input):
def _get_param_names(self, graph):
def _opt_raw_graph(self, raw_graph, is_control_flow_graph):
def _opt_raw_block(self, raw_block):
def _optimize_for_jit(self, raw_block):
def _optimize(self, raw_block):
def _create_attrs_value(self, graph):
def _create_inputs_value(self, graph):
def _build_block_graph(self, fw_block):
def _create_raw_graph(self, graph_name, fw_graph):
def _is_param_const_node(self, fw_node):
def add_torch_node(self, fw_node):
def _check_stub_topology(self, raw_graph):
def _path_str(path):
def create_graph_handler(module):
import torch
if isinstance(module, torch.jit.ScriptModule):
from .script_helper import TorchScriptModuleHandler
return TorchScriptModuleHandler()
elif isinstance(module, torch.nn.Module):
from .trace_helper import TorchGraphHandler
return TorchGraphHandler()
else:
raise NotImplementedError() | null |
23,691 | from nndct_shared.nndct_graph.base_tensor import Tensor
from pytorch_nndct.utils import TorchGraphSymbol
from .rich_in_out_helper import FlattenInOutModelForTrace
class Tensor(object):
"""A wrapper of np.ndarray used in two ways:
- The outputs of an operation.
- The parameters of an operation.
In the former case, you can use `tensor.node` to get the node that
outputs this tensor.
In the latter case, `tensor.node` is None.
For getting raw ndarray, call `tensor.data`.
"""
def __init__(self,
name=None,
shape=None,
dtype=None,
device=None,
requires_grad=None,
data=None,
node=None,
layout=None):
self._node = weakref.ref(node) if node else node
self._name = name
self._shape = shape
self._data = data
self._dtype_map = {
np.dtype('float64'): 'float64',
np.dtype('float32'): 'float32',
np.dtype('float16'): 'float16',
np.dtype('complex64'): 'complex64',
np.dtype('int64'): 'int64',
np.dtype('int32'): 'int32',
np.dtype('int16'): 'int16',
np.dtype('int8'): 'int8',
np.dtype('bool'): 'bool',
np.dtype('uint8'): 'uint8'
}
if dtype in self._dtype_map:
self._dtype = self._dtype_map[dtype]
else:
self._dtype = dtype
self._device = device
self._requires_grad = requires_grad
self._offset = 0
self._uses = []
self._attr_uses = []
self._device = device
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_tensor):
self._shape = src_tensor._shape
self._data = copy.deepcopy(src_tensor._data)
self._dtype = src_tensor._dtype
self._device = src_tensor._device
self._requires_grad = src_tensor._requires_grad
def from_ndarray(self, data):
if not isinstance(data, np.ndarray):
raise TypeError("'data' must be a numpy ndarray")
self._data = np.copy(data)
self._dtype = self._dtype_map[self._data.dtype]
self._shape = list(self._data.shape)
def from_tensor(self, tensor):
self._dtype = tensor.dtype
self._shape = tensor.shape
def from_des(self, shape, dtype):
self._shape = shape
self._dtype = dtype
def transpose(self, axes=None):
trans_data = None
if self._data is not None:
trans_data = self._data.transpose(axes)
trans_data = np.ascontiguousarray(trans_data)
trans_shape = list(trans_data.shape)
else:
trans_shape = [self._shape[i] for i in axes]
self._data = trans_data
self._shape = trans_shape
def clean_data(self):
self._data = None
def __str__(self):
return "Tensor: {}(shape={}, dtype={})".format(
self._name if self._name else "", self._shape, self._dtype)
def description(self):
desp = {}
desp['name'] = self._name
desp['shape'] = self._shape
desp['dtype'] = self._dtype
desp['node'] = self.node.name if self.node else None
return desp
def is_real_tensor(self):
return self.is_complete_tensor() or self.dtype == "tensor"
def is_list_type(self):
return "list" in self.dtype
def is_complete_tensor(self) -> bool:
# not necessary to hold real data for completeTensor
return True if self.shape and self.dtype else False
def is_param_tensor(self) -> bool:
return True if self._node is None else False
def shape(self):
return self._shape
def shape(self, shape):
self._shape = shape
def ndim(self):
return len(self._shape) if self._shape else None
def dtype(self):
return self._dtype
def dtype(self, dtype):
self._dtype = dtype
def data(self):
return self._data
def data(self, value):
if isinstance(value, np.ndarray):
self.from_ndarray(value)
elif isinstance(value, Tensor):
self.from_tensor(value)
elif isinstance(value, (int, float, bool)):
#raise ValueError(f"Accept [int, float, bool] type data, but {type(value)} is given")
self._data = value
self._shape = []
def node(self):
return self._node() if self._node is not None else None
def node(self, value):
self._node = weakref.ref(value) if value else value
def name(self):
return self._name
def name(self, name):
self._name = name
def device(self):
return self._device
def device(self, device):
self._device = device
def requires_grad(self):
return self._requires_grad
def requires_grad(self, need_grad):
self._requires_grad = need_grad
def offset(self):
return self._offset
def offset(self, offset):
self._offset = offset
def uses(self):
return self._uses
def owning_graph(self):
return self.node.owning_graph
def attr_uses(self):
return self._attr_uses
def replace_first_use_with(self, new_tensor):
assert self.owning_graph is new_tensor.owning_graph
u = self.uses[0]
u.user.in_tensors[u.offset] = new_tensor
new_tensor.uses.append(u)
self.uses.pop(0)
def replace_uses_with(self, new_tensor):
assert self is not new_tensor
while len(self.uses) > 0:
self.replace_first_use_with(new_tensor)
self.replace_attr_uses_with(new_tensor)
def replace_attr_with_new_tensor_v2(self, attr_use, new_tensor):
def _replace(attr_name, attr_value):
if isinstance(attr_value, list):
new_attr_value = []
for value in attr_value:
if self is value:
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
elif isinstance(value, (tuple, list)):
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
else:
new_attr_value.append(value)
return new_attr_value
elif isinstance(attr_value, tuple):
new_attr_value = []
for value in list(attr_value):
if self is value:
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
elif isinstance(value, (tuple, list)):
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
else:
new_attr_value.append(value)
new_attr_value = tuple(new_attr_value)
return new_attr_value
else:
if self is not attr_value:
if attr_name == attr_use.attr_name:
self.attr_uses.remove(attr_use)
return attr_value
else:
if attr_use not in new_tensor.attr_uses:
new_tensor.attr_uses.append(attr_use)
self.attr_uses.remove(attr_use)
return new_tensor
if isinstance(attr_use.attr_name, str):
attr_value = attr_use.user.get_config(attr_use.attr_name)
else:
attr_value = attr_use.user.get_attr(attr_use.attr_name)
new_attr_value = _replace(attr_use.attr_name, attr_value)
if isinstance(attr_use.attr_name, str):
attr_use.user.set_config(attr_use.attr_name, new_attr_value)
else:
attr_use.user.update_attr(attr_use.attr_name, new_attr_value)
def replace_attr_with_new_tensor(self, attr_use, new_tensor):
def _replace(attr_name, attr_value):
if isinstance(attr_value, list):
if self in attr_value:
for i in range(len(attr_value)):
if attr_value[i] is self:
attr_value[i] = new_tensor
if attr_use not in new_tensor.attr_uses:
new_tensor.attr_uses.append(attr_use)
self.attr_uses.remove(attr_use)
return
else:
for val in attr_value:
_replace(attr_name, val)
if self is not attr_value:
if attr_name == attr_use.attr_name:
self.attr_uses.remove(attr_use)
return
if isinstance(attr_name, str):
attr_use.user.set_config(attr_name, new_tensor)
else:
attr_use.user.update_attr(attr_name, new_tensor)
if isinstance(attr_use.attr_name, str):
attr_value = attr_use.user.get_config(attr_use.attr_name)
else:
attr_value = attr_use.user.get_attr(attr_use.attr_name)
_replace(attr_use.attr_name, attr_value)
def replace_first_attr_use_with(self, new_tensor):
attr_use = self.attr_uses[0]
self.replace_attr_with_new_tensor_v2(attr_use, new_tensor)
def replace_attr_uses_with(self, new_tensor):
while len(self.attr_uses) > 0:
self.replace_first_attr_use_with(new_tensor)
def python_dtype(value):
type_map = {
"torch.int": "int",
"torch.long": "int",
"torch.short": "int",
"torch.float": "float",
"torch.half": "float",
"torch.double": "float",
"torch.bool": "bool",
int: "int",
float: "float",
bool: "bool",
str: "str",
"int64": "int",
"int32": "int",
"float32": "float",
"float64": "float",
"float16": "float"
}
if isinstance(value, Tensor):
return type_map.get(value.dtype, value.dtype)
else:
return type_map[type(value)] | null |
23,692 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
HANDLED_FUNCTIONS = {}
The provided code snippet includes necessary dependencies for implementing the `implements` function. Write a Python function `def implements(torch_function_list)` to solve the following problem:
Register a torch function override for ScalarTensor
Here is the function:
def implements(torch_function_list):
"""Register a torch function override for ScalarTensor"""
@functools.wraps(torch_function_list)
def decorator(func):
for torch_fn in torch_function_list:
HANDLED_FUNCTIONS[torch_fn] = func
return func
return decorator | Register a torch function override for ScalarTensor |
23,693 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
def convert_tensor_to_tracetensor(data):
if isinstance(data, torch.Tensor):
return TraceTensor(data.detach().cpu().numpy()).to(data.device).to(data.dtype)
if not isinstance(data,(tuple, list)):
return data
new_data = []
for item in data:
new_data.append(convert_tensor_to_tracetensor(item))
if isinstance(data, tuple):
new_data = tuple(new_data)
return new_data
def convert_tracetensor_to_tensor(data):
if isinstance(data, TraceTensor):
return torch.Tensor(data.detach().cpu().numpy()).to(data.device).to(data.dtype)
if not isinstance(data,(tuple, list)):
return data
new_data = []
for item in data:
new_data.append(convert_tracetensor_to_tensor(item))
if isinstance(data, tuple):
new_data = tuple(new_data)
return new_data
def torch_function_old_version(func, types, args, kwargs):
args_t = convert_tracetensor_to_tensor(args)
ret = func(*args_t, **kwargs)
ret = convert_tensor_to_tracetensor(ret)
return ret | null |
23,694 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
class TraceTensor(torch.Tensor):
def __torch_function__(cls, func, types, args=(), kwargs=None):
def split(tensor:TraceTensor, split_size_or_sections:'int|list[int]|tuple[int]', dim:int=0):
def aten_split_with_sizes(tensor, split_size_or_sections, dim=0):
if isinstance(split_size_or_sections, (list,tuple)):
sum = 0
for x in split_size_or_sections:
sum = sum + x
assert(sum == tensor.shape[dim])
tensor_list_return = []
last_end_index = 0
for k in range(len(split_size_or_sections)):
tmp = [slice(None, None) for x in tensor.shape]
start = last_end_index
end = start + split_size_or_sections[k]
last_end_index = end
tmp[dim] = slice(start,end)
tensor_list_return.append(tensor[tmp])
return tuple(tensor_list_return)
def aten_split(tensor, split_size_or_sections, dim=0):
if not isinstance(split_size_or_sections, int):
assert(False)
group_num = tensor.shape[dim] / split_size_or_sections
group_num = math.ceil(group_num)
if group_num == 0:
group_num = 1
tensor_list_return = []
for k in range(group_num):
tmp = [slice(None, None) for x in tensor.shape]
start = k * split_size_or_sections
if k == (group_num - 1):
end = tensor.shape[dim]
else:
end = (k + 1) * split_size_or_sections
tmp[dim] = slice(start,end)
tensor_list_return.append(tensor[tmp])
return tuple(tensor_list_return)
if isinstance(split_size_or_sections, (list, tuple)):
return aten_split_with_sizes(tensor, split_size_or_sections, dim)
if isinstance(split_size_or_sections, int):
return aten_split(tensor, split_size_or_sections, dim)
assert(False) | null |
23,695 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
class TraceTensor(torch.Tensor):
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in HANDLED_FUNCTIONS:
if hasattr(torch.Tensor, '__torch_function__'):
return super().__torch_function__(func, types, args, kwargs)
return torch_function_old_version(func, types, args, kwargs)
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def chunk(input:TraceTensor, chunks:int, dim:int=0):
assert(chunks != 0)
split_size = math.ceil(input.shape[dim] / chunks)
if split_size != 0:
chunks = math.ceil(input.shape[dim] / split_size)
tensor_list_return = []
for k in range(chunks):
tmp = [slice(None, None) for x in input.shape]
start = k * split_size
if k == (chunks - 1):
end = input.shape[dim]
else:
end = (k + 1) * split_size
tmp[dim] = slice(start,end)
tensor_list_return.append(input[tmp])
return tuple(tensor_list_return) | null |
23,696 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
def logging_warn(message):
NndctScreenLogger().warning2user(QWarning.FLOAT_OP, message)
class TraceTensor(torch.Tensor):
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in HANDLED_FUNCTIONS:
if hasattr(torch.Tensor, '__torch_function__'):
return super().__torch_function__(func, types, args, kwargs)
return torch_function_old_version(func, types, args, kwargs)
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def check_big_pooling(kernel_size, stride, padding):
if not NndctOption.nndct_pooling_split_mode.value:
return False
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(padding, int):
padding = (padding, padding)
#------------------------
# only support pooling == 0
check_padding_flag = True
if padding[0] != 0 or padding[1] != 0:
check_padding_flag = False
#------------------------
# only support without overlap
check_stride_flag = True
if stride != None:
if stride[0] != kernel_size[0] or stride[1] != kernel_size[1]:
check_stride_flag = False
#------------------------
# only support kernel_size < 512
check_kernel_size_flag = False
if kernel_size[0] * kernel_size[1] >= 512:
check_kernel_size_flag = True
#------------------------
base_check_result = check_stride_flag and check_kernel_size_flag and check_padding_flag
#------------------------
if base_check_result:
# pre-check new kernel_size is equal to orgin kernel_size, prevent infinite recursion
new_kernel_size1 = []
new_kernel_size2 = []
for size in kernel_size:
a, b = change2TwoNumberMultiplication(size)
new_kernel_size1.append(a)
new_kernel_size2.append(b)
new_kernel_size1 = tuple(new_kernel_size1)
new_kernel_size2 = tuple(new_kernel_size2)
if new_kernel_size1[0] == kernel_size[0] and new_kernel_size1[1] == kernel_size[1]:
return False
if new_kernel_size2[0] == kernel_size[0] and new_kernel_size2[1] == kernel_size[1]:
return False
#------------------------
return base_check_result
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
# kernel_size (Union[int, Tuple[int, int]]) – the size of the window
# stride (Union[int, Tuple[int, int]]) – the stride of the window. Default value is kernel_size
# padding (Union[int, Tuple[int, int]]) – implicit zero padding to be added on both sides
# ceil_mode (bool) – when True, will use ceil instead of floor to compute the output shape
# count_include_pad (bool) – when True, will include the zero-padding in the averaging calculation
# divisor_override (Optional[int]) – if specified, it will be used as divisor, otherwise size of the pooling region will be used.
#-------------------------------------------------------
# big kernel size split into two smaller kernels
if check_big_pooling(kernel_size, stride, padding):
logging_warn('big pooling split')
new_kernel_size1 = []
new_kernel_size2 = []
for size in kernel_size:
a, b = change2TwoNumberMultiplication(size)
new_kernel_size1.append(a)
new_kernel_size2.append(b)
new_kernel_size1 = tuple(new_kernel_size1)
new_kernel_size2 = tuple(new_kernel_size2)
new_stride1 = new_kernel_size1
new_stride2 = new_kernel_size2
input = torch.nn.functional.avg_pool2d(input, new_kernel_size1, new_stride1, padding, ceil_mode, count_include_pad, divisor_override)
input = torch.nn.functional.avg_pool2d(input, new_kernel_size2, new_stride2, padding, ceil_mode, count_include_pad, divisor_override)
return input
else:
normal_data = input.as_subclass(torch.Tensor)
normal_data = torch.nn.functional.avg_pool2d(normal_data, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)
return normal_data.as_subclass(TraceTensor)
def adaptive_avg_pool2d(input, output_size):
# output_size (Union[int, None, Tuple[Optional[int], Optional[int]]]) – the target output size of the image of the form H x W. Can be a tuple (H, W) or a single H for a square image H x H. H and W can be either a int, or None which means the size will be the same as that of the input.
if output_size == 1:
kernel_size = [int(input.shape[-2]), int(input.shape[-1])]
if check_big_pooling(kernel_size, kernel_size, 0):
logging_warn('replace adapt pooling to pooling')
return torch.nn.functional.avg_pool2d(input, kernel_size)
normal_data = input.as_subclass(torch.Tensor)
normal_data = torch.nn.functional.adaptive_avg_pool2d(normal_data, output_size)
return normal_data.as_subclass(TraceTensor) | null |
23,697 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
def logging_warn(message):
NndctScreenLogger().warning2user(QWarning.FLOAT_OP, message)
class TraceTensor(torch.Tensor):
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in HANDLED_FUNCTIONS:
if hasattr(torch.Tensor, '__torch_function__'):
return super().__torch_function__(func, types, args, kwargs)
return torch_function_old_version(func, types, args, kwargs)
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def check_big_pooling(kernel_size, stride, padding):
if not NndctOption.nndct_pooling_split_mode.value:
return False
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(padding, int):
padding = (padding, padding)
#------------------------
# only support pooling == 0
check_padding_flag = True
if padding[0] != 0 or padding[1] != 0:
check_padding_flag = False
#------------------------
# only support without overlap
check_stride_flag = True
if stride != None:
if stride[0] != kernel_size[0] or stride[1] != kernel_size[1]:
check_stride_flag = False
#------------------------
# only support kernel_size < 512
check_kernel_size_flag = False
if kernel_size[0] * kernel_size[1] >= 512:
check_kernel_size_flag = True
#------------------------
base_check_result = check_stride_flag and check_kernel_size_flag and check_padding_flag
#------------------------
if base_check_result:
# pre-check new kernel_size is equal to orgin kernel_size, prevent infinite recursion
new_kernel_size1 = []
new_kernel_size2 = []
for size in kernel_size:
a, b = change2TwoNumberMultiplication(size)
new_kernel_size1.append(a)
new_kernel_size2.append(b)
new_kernel_size1 = tuple(new_kernel_size1)
new_kernel_size2 = tuple(new_kernel_size2)
if new_kernel_size1[0] == kernel_size[0] and new_kernel_size1[1] == kernel_size[1]:
return False
if new_kernel_size2[0] == kernel_size[0] and new_kernel_size2[1] == kernel_size[1]:
return False
#------------------------
return base_check_result
def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):
# kernel_size (Union[int, Tuple[int, int]]) – the size of the window to take a max over
# stride (Union[int, Tuple[int, int]]) – the stride of the window. Default value is kernel_size
# padding (Union[int, Tuple[int, int]]) – implicit zero padding to be added on both sides
# dilation (Union[int, Tuple[int, int]]) – a parameter that controls the stride of elements in the window
# return_indices (bool) – if True, will return the max indices along with the outputs. Useful for torch.nn.MaxUnpool2d later
# ceil_mode (bool) – when True, will use ceil instead of floor to compute the output shape
#-----------------------------------------------
# big kernel size split into two smaller kernels
if check_big_pooling(kernel_size, stride, padding):
logging_warn('big pooling split')
new_kernel_size1 = []
new_kernel_size2 = []
for size in kernel_size:
a, b = change2TwoNumberMultiplication(size)
new_kernel_size1.append(a)
new_kernel_size2.append(b)
new_kernel_size1 = tuple(new_kernel_size1)
new_kernel_size2 = tuple(new_kernel_size2)
new_stride1 = new_kernel_size1
new_stride2 = new_kernel_size2
input = torch.nn.functional.max_pool2d(input, new_kernel_size1, new_stride1, padding, dilation, ceil_mode, return_indices)
input = torch.nn.functional.max_pool2d(input, new_kernel_size2, new_stride2, padding, dilation, ceil_mode, return_indices)
return input
else:
normal_data = input.as_subclass(torch.Tensor)
normal_data = torch.nn.functional.max_pool2d(normal_data, kernel_size, stride, padding, dilation, ceil_mode, return_indices)
return normal_data.as_subclass(TraceTensor)
def adaptive_max_pool2d(input, output_size, return_indices=False):
# output_size (Union[int, None, Tuple[Optional[int], Optional[int]]]) – the target output size of the image of the form can be either a int, or None which means the size will be the same as that of the input.
# return_indices (bool) – if True, will return the indices along with the outputs. Useful to pass to nn.MaxUnpool2d. Default: False
if output_size == 1:
kernel_size = [int(input.shape[-2]), int(input.shape[-1])]
if check_big_pooling(kernel_size, kernel_size, 0):
logging_warn('replace adapt pooling to pooling')
return torch.nn.functional.max_pool2d(input, kernel_size)
normal_data = input.as_subclass(torch.Tensor)
normal_data = torch.nn.functional.adaptive_max_pool2d(normal_data, output_size, return_indices)
return normal_data.as_subclass(TraceTensor) | null |
23,698 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
def logging_warn(message):
class TraceTensor(torch.Tensor):
def __torch_function__(cls, func, types, args=(), kwargs=None):
def check_big_pooling(kernel_size, stride, padding):
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
def mean(input, dim, keepdim=False, *, dtype=None, out=None):
# Tensor.mean(dim=None, keepdim=False, *, dtype=None)
if isinstance(dim, (list, tuple)) and len(dim) == 2:
new_dim = []
for k in dim:
if k < 0:
k = k + input.dim()
new_dim.append(k)
dim = new_dim
if (input.dim() == 3 and tuple(dim) == (1, 2)) or (input.dim() == 4 and tuple(dim) == (2, 3)):
kernel_size = [int(input.shape[-2]), int(input.shape[-1])]
if check_big_pooling(kernel_size, kernel_size, 0):
logging_warn('replace mean to avg pooling')
data = torch.nn.functional.avg_pool2d(input, kernel_size)
if not keepdim:
data = torch.squeeze(data, dim=-1)
data = torch.squeeze(data, dim=-1)
return data
normal_data = input.as_subclass(torch.Tensor)
normal_data = torch.mean(normal_data, dim, keepdim=keepdim, dtype=dtype, out=out)
return normal_data.as_subclass(TraceTensor) | null |
23,699 | import torch
import torch
import math
import functools
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
def clear_override_import_redundant_op(graph):
# remove alias
alias_node_list = graph.findAllNodes("aten::alias")
for alias_node in alias_node_list:
out_val = alias_node.output()
inp_val = alias_node.inputsAt(0)
out_val.replaceAllUsesWith(inp_val)
alias_node.destroy() | null |
23,700 | from .op_dispatcher import *
from .parse_utils import *
def change_addmm_to_linear(raw_graph):
for node in raw_graph.nodes:
if node.op.type in [NNDCT_OP.ADDMM]:
weight = node.op.get_config('mat2')
bias = node.op.get_config('input')
if (weight and weight.node == None) and (bias and bias.node == None):
linear_op = TorchLinear()
weight_size = weight.shape
linear_op.set_param(linear_op.ParamName.WEIGHTS, weight)
if bias is None:
linear_op.set_config("bias", False)
else:
linear_op.set_config("bias", True)
linear_op.set_param(linear_op.ParamName.BIAS, bias)
linear_op.set_config('out_features', weight_size[1])
linear_op.set_config('in_features', weight_size[0])
addmm_weights = linear_op.params[linear_op.ParamName.WEIGHTS].data
addmm_weights = addmm_weights.transpose(1,0)
linear_op.set_param_from_data(
linear_op.ParamName.WEIGHTS,
addmm_weights)
node.op = linear_op | null |
23,701 | import re
import torch
import collections
from typing import List, Dict
from dataclasses import dataclass
class HandleListType(Schema):
schemas: List[Schema]
sizes: List[int]
def __call__(self, values):
values = self._split(values, self.sizes)
if len(values) != len(self.schemas):
raise ValueError(
f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!"
)
values = [m(v) for m, v in zip(self.schemas, values)]
return list(values)
def flatten(cls, obj):
res = [flatten_to_tuple(k) for k in obj]
values, sizes = cls._concat([k[0] for k in res])
return values, cls([k[1] for k in res], sizes)
class HandleTupleType(HandleListType):
def __call__(self, values):
return tuple(super().__call__(values))
class IdentitySchema(Schema):
def __call__(self, values):
try:
return values[0]
except:
return values
def flatten(cls, obj):
return (obj,), cls()
class HandleDictType(HandleListType):
keys: List[str]
def __call__(self, values):
values = super().__call__(values)
return dict(zip(self.keys, values))
def flatten(cls, obj):
for k in obj.keys():
if not isinstance(k, str):
raise KeyError("Only support flattening dictionaries if keys are str.")
keys = sorted(obj.keys())
values = [obj[k] for k in keys]
ret, schema = HandleListType.flatten(values)
return ret, cls(schema.schemas, schema.sizes, keys)
class HandleOrderDictType(HandleListType):
keys: List[str]
def __call__(self, values):
values = super().__call__(values)
return dict(zip(self.keys, values))
def flatten(cls, obj):
for k in obj.keys():
if not isinstance(k, str):
raise KeyError("Only support flattening dictionaries if keys are str.")
keys = obj.keys()
values = [obj[k] for k in keys]
ret, schema = HandleListType.flatten(values)
return ret, cls(schema.schemas, schema.sizes, keys)
def flatten_to_tuple(data):
support_list = [
((str, bytes), IdentitySchema),
(list, HandleListType),
(tuple, HandleTupleType),
(collections.OrderedDict, HandleOrderDictType),
(collections.abc.Mapping, HandleDictType),
]
for support_type, schema in support_list:
if isinstance(data, support_type):
handle = schema
break
else:
handle = IdentitySchema
return handle.flatten(data) | null |
23,702 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DeviceType(AutoName):
CPU = auto()
DPU = auto()
class DeviceInfo(object):
def __init__(self, device_type):
assert isinstance(device_type, DeviceType)
self._type = device_type
self._device_partition_check_msg = None
def get_device_type(self):
return self._type
def set_filter_message(self, msg):
self._device_partition_check_msg = msg
def get_filter_message(self):
return self._device_partition_check_msg
def clear_filter_message(self):
self._device_partition_check_msg = None
def check_nonlinear(engine, node):
op_nonlinear_map = {
NNDCT_OP.CONV2D: [NNDCT_OP.RELU, NNDCT_OP.RELU6, NNDCT_OP.PRELU, NNDCT_OP.LEAKY_RELU, NNDCT_OP.HSWISH, NNDCT_OP.HSIGMOID],
NNDCT_OP.CONVTRANSPOSE2D: [NNDCT_OP.RELU, NNDCT_OP.RELU6, NNDCT_OP.PRELU, NNDCT_OP.LEAKY_RELU],
NNDCT_OP.DEPTHWISE_CONV2D: [NNDCT_OP.RELU, NNDCT_OP.RELU6, NNDCT_OP.PRELU, NNDCT_OP.LEAKY_RELU],
NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: [],
}
msg = ""
nonlinear_types = op_nonlinear_map.get(node.op.type)
if nonlinear_types is None:
nonlinear_types = []
children_nodes = node.owning_graph.children(node)
if len(children_nodes) == 1 and children_nodes[0].op.type in nonlinear_types:
if children_nodes[0].op.type == NNDCT_OP.LEAKY_RELU:
alpha = children_nodes[0].node_attr(children_nodes[0].op.AttrName.ALPHA)
dpu_alpha = 26.0 / 256
if alpha != dpu_alpha:
msg = f"Its alpa is {alpha}, but DPU only support {dpu_alpha}."
return False, msg
children_nodes[0].target_device = DeviceInfo(DeviceType.DPU)
return True, msg | null |
23,703 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def check_kernel(kernels, kernel_limit):
def check_stride(strides, stride_limit):
def check_load_jump_write(ic, channel_parallel, dilation=None):
def check_pad(pad, kernel):
def check_conv_weights_bank_depth(target, engine, kernel_shape):
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
def get_full_info(dpu_target):
def parse_range(num_range):
def has_attr(message, member):
def get_name(dpu_target):
def get_type(dpu_target):
def get_conv_engine(dpu_target):
def get_alu_engine(dpu_target):
def get_pool_engine(dpu_target):
def get_eltwise_engine(dpu_target):
def has_alu_engine(dpu_target):
def has_pool_engine(dpu_target):
def has_dwconv_engine(dpu_target):
def has_eltwise_engine(dpu_target):
def get_bank_group(dpu_target):
def get_load_engine(dpu_target):
def get_dwconv_engine(dpu_target):
def filter_conv2d(node, target):
msg = ""
ksize = node.node_attr(node.op.AttrName.KERNEL)
strides = node.node_attr(node.op.AttrName.STRIDE)
dilation = node.node_attr(node.op.AttrName.DILATION)
padding = node.node_attr(node.op.AttrName.PAD)
conv_engine = DPUTargetHelper.get_conv_engine(target)
channel_parallel = conv_engine.input_channel_parallel
ic = node.in_tensors[0].shape[3]
oc = node.out_tensors[0].shape[3]
dilated_ksize = list(ksize)
for i in range(len(dilated_ksize)):
dilated_ksize[i] = (ksize[i] - 1) * dilation[i] + 1
kernel_limit = DPUTargetHelper.parse_range("1-16")
if DPUTargetHelper.has_attr(conv_engine, "conv_limit") and conv_engine.conv_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(conv_engine.conv_limit.kernel_size)
ret, msg = check_kernel(ksize, kernel_limit)
if not ret:
return ret, msg
ret, msg = check_conv_weights_bank_depth(target, conv_engine, node.op.get_param(node.op.ParamName.WEIGHTS).shape)
if not ret:
return ret, msg
stride_limit = DPUTargetHelper.parse_range("1-4")
if DPUTargetHelper.has_attr(conv_engine, "conv_limit") and conv_engine.conv_limit.stride:
stride_limit = DPUTargetHelper.parse_range(conv_engine.conv_limit.stride)
iw = node.in_tensors[0].shape[2]
ih = node.in_tensors[0].shape[1]
if iw != ksize[0] or ih != ksize[1]:
ret, msg = check_stride(strides, stride_limit)
if not ret:
return ret, msg
ret, msg = check_load_jump_write(ic, channel_parallel, dilation)
if not ret:
return ret, msg
ret, msg = check_pad(padding, dilated_ksize)
if not ret:
return ret, msg
# ret, msg = check_nonlinear(conv_engine, node)
# if not ret:
# return ret, msg
return True, msg | null |
23,704 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def check_load_jump_write(ic, channel_parallel, dilation=None):
msg = ""
dilation = dilation if dilation is not None else [1, 1]
cp_limit = 256 * channel_parallel
if ic > cp_limit:
msg = f"DPU only supports 'input_channel'({ic}) less than ({cp_limit})"
return False, msg
return True, msg
def check_save_jump_read(oc, channel_parallel):
msg = ""
cp_limit = 256 * channel_parallel
if oc > cp_limit:
msg = f"DPU only support 'output_channel'({oc}) less than {cp_limit}"
return False, msg
return True, msg
def check_pad(pad, kernel):
msg = ""
if any([p < 0 for p in pad]):
msg = f"DPU only support non-negative 'pad'({pad})"
return False, msg
if pad[0] > kernel[0]:
msg = f"DPU only supports 'pad_left'({pad[0]}) less than 'kernel_width'({kernel[0]})"
return False, msg
if pad[1] > kernel[0]:
msg = f"DPU only supports 'pad_right'({pad[1]}) less than 'kernel_width'({kernel[0]})"
return False, msg
if pad[2] > kernel[1]:
msg = f"DPU only supports 'pad_top'({pad[2]}) less than 'kernel_width'({kernel[1]})"
return False, msg
if pad[3] > kernel[1]:
msg = f"DPU only supports 'pad_bottom'({pad[3]}) less than 'kernel_width'({kernel[1]})"
return False, msg
return True, msg
def check_conv_weights_bank_depth(target, engine, kernel_shape):
msg = ""
weight_bank_name = engine.weight_bank
bank_groups = DPUTargetHelper.get_bank_group(target)
weights_bank = None
for bank_group in bank_groups:
if bank_group.name == weight_bank_name:
weights_bank = bank_group
break
if weights_bank is None:
msg = f"{target.get_name()}'s bank group configure is error, there's no weights bank for the engine."
return False, msg
output_channel_parallel = engine.output_channel_parallel
k_oc, k_h, k_w, k_ic = kernel_shape
weight_depth = k_w * k_h * math.ceil(k_ic * 1.0 / weights_bank.bank_width) * math.ceil(output_channel_parallel * 1.0 / weights_bank.bank_num)
if weight_depth > weights_bank.bank_depth:
msg = f"Weights({kernel_shape}) is too large to be loaded into parameter buffer. 'kernel_h * kernel_w * ⌈input_channel / weights_bank_width⌉ * ⌈output_channel_parallel / weights_bank_num⌉({weight_depth})' is supporsed to be less equal than {weights_bank.bank_depth}."
return False, msg
return True, msg
def check_transposed_kernel(kernel, stride, limit):
msg = ""
if not (kernel // stride in limit and (kernel % stride == 0 or (kernel // stride + 1) in limit)):
msg = f"'kernel / stride'({kernel} / {stride}) is not in DPU supported range{limit}."
return False, msg
return True, msg
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_transpose_conv2d(node, target):
msg = ""
ksize = node.node_attr(node.op.AttrName.KERNEL)
strides = node.node_attr(node.op.AttrName.STRIDE)
dilation = node.node_attr(node.op.AttrName.DILATION)
padding = node.node_attr(node.op.AttrName.PAD)
output_padding = node.node_config("output_padding")
if any([pad != 0 for pad in output_padding]):
msg = "DPU does not support output_padding."
return False, msg
conv_engine = DPUTargetHelper.get_conv_engine(target)
channel_parallel = conv_engine.input_channel_parallel
ic = node.in_tensors[0].shape[3]
oc = node.out_tensors[0].shape[3]
kernel_limit = DPUTargetHelper.parse_range("1-16")
if DPUTargetHelper.has_attr(conv_engine, "conv_limit") and conv_engine.conv_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(conv_engine.conv_limit.kernel_size)
ret, msg = check_transposed_kernel(ksize[0], strides[0], kernel_limit)
if not ret:
return ret, msg
ret, msg = check_transposed_kernel(ksize[1], strides[1], kernel_limit)
if not ret:
return ret, msg
ret, msg = check_conv_weights_bank_depth(target, conv_engine, node.op.get_param(node.op.ParamName.WEIGHTS).shape)
if not ret:
return ret, msg
ret, msg = check_load_jump_write(ic, channel_parallel, dilation)
if not ret:
return ret, msg
ret, msg = check_save_jump_read(oc, channel_parallel)
if not ret:
return ret, msg
ret, msg = check_pad(padding, ksize)
if not ret:
return ret, msg
# ret, msg = check_nonlinear(conv_engine, node)
# if not ret:
# return ret, msg
return True, msg | null |
23,705 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_conv3d(node, target):
msg = ""
if DPUTargetHelper.get_type(target) != "DPUCVDX8G":
msg = f"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this."
return False, msg
return True, msg | null |
23,706 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_depthwise_conv3d(node, target):
msg = ""
if DPUTargetHelper.get_type(target) != "DPUCVDX8G":
msg = f"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this."
return False, msg
return True, msg | null |
23,707 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_transpose_conv3d(node, target):
msg = ""
if DPUTargetHelper.get_type(target) != "DPUCVDX8G":
msg = f"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this."
return False, msg
output_padding = node.node_config("output_padding")
if any([pad != 0 for pad in output_padding]):
msg = "DPU does not support output_padding."
return False, msg
return True, msg | null |
23,708 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
def get_full_info(dpu_target):
def parse_range(num_range):
def has_attr(message, member):
def get_name(dpu_target):
def get_type(dpu_target):
def get_conv_engine(dpu_target):
def get_alu_engine(dpu_target):
def get_pool_engine(dpu_target):
def get_eltwise_engine(dpu_target):
def has_alu_engine(dpu_target):
def has_pool_engine(dpu_target):
def has_dwconv_engine(dpu_target):
def has_eltwise_engine(dpu_target):
def get_bank_group(dpu_target):
def get_load_engine(dpu_target):
def get_dwconv_engine(dpu_target):
def filter_transpose_depthwise_conv3d(node, target):
msg = ""
if DPUTargetHelper.get_type(target) != "DPUCVDX8G":
msg = f"{DPUTargetHelper.get_name(target)} does not support {node.op.type}. Only DPUCVDX8G support this."
return False, msg
output_padding = node.node_config("output_padding")
if any([pad != 0 for pad in output_padding]):
msg = "DPU does not support output_padding."
return False, msg
return True, msg | null |
23,709 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def check_kernel(kernels, kernel_limit):
msg = ""
if any([k not in kernel_limit for k in kernels]):
msg = f"'kernel'({kernels[0]} x {kernels[1]}) is not in DPU supported range({kernel_limit})."
return False, msg
return True, msg
def check_stride(strides, stride_limit):
msg = ""
if any([s not in stride_limit for s in stride_limit]):
msg = f"'stride'({strides}) is not in DPU supported range({stride_limit})."
return False, msg
return True, msg
def check_pad(pad, kernel):
msg = ""
if any([p < 0 for p in pad]):
msg = f"DPU only support non-negative 'pad'({pad})"
return False, msg
if pad[0] > kernel[0]:
msg = f"DPU only supports 'pad_left'({pad[0]}) less than 'kernel_width'({kernel[0]})"
return False, msg
if pad[1] > kernel[0]:
msg = f"DPU only supports 'pad_right'({pad[1]}) less than 'kernel_width'({kernel[0]})"
return False, msg
if pad[2] > kernel[1]:
msg = f"DPU only supports 'pad_top'({pad[2]}) less than 'kernel_width'({kernel[1]})"
return False, msg
if pad[3] > kernel[1]:
msg = f"DPU only supports 'pad_bottom'({pad[3]}) less than 'kernel_width'({kernel[1]})"
return False, msg
return True, msg
def check_pad_with_limit(pad, kernel, pad_limit):
msg = ""
if any([p < 0 for p in pad]):
msg = f"DPU only support non-negative 'pad'({pad})"
return False, msg
pad_idx_kernel_map = {
"pad_left": [0, 0], # [pad_idx, kernel_idx]
"pad_right": [1, 0],
"pad_top": [2, 1],
"pad_bottom": [3, 1]
}
for key, pad_idx_kernel_idx in pad_idx_kernel_map.items():
pad_idx, kernel_idx = pad_idx_kernel_idx
if pad_limit[key]:
if pad[pad_idx] not in pad_limit[key]:
msg = f"{key}({pad[pad_idx]}) is not in range."
return False, msg
else:
if pad[pad_idx] > kernel[kernel_idx]:
msg = f"DPU only supports {key}({pad[pad_idx]}) less than 'kernel'({kernel[kernel_idx]})."
return False, msg
return True, msg
def check_pool_engine(target):
msg = ""
if not (DPUTargetHelper.has_pool_engine(target) or DPUTargetHelper.has_alu_engine(target)):
msg = f"{DPUTargetHelper.get_name(target)} does not have pool-engine."
return False, msg
return True, msg
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_pool(node, target):
msg = ""
ret, msg = check_pool_engine(target)
if not ret:
return ret, msg
avg_pool_type = [NNDCT_OP.AVG_POOL, NNDCT_OP.ADAPTIVEAVGPOOL2D]
max_pool_type = [NNDCT_OP.MAX_POOL]
ksize = node.node_attr(node.op.AttrName.KERNEL)
strides = node.node_attr(node.op.AttrName.STRIDE)
padding = node.node_attr(node.op.AttrName.PAD)
if DPUTargetHelper.has_alu_engine(target):
alu_engine = DPUTargetHelper.get_alu_engine(target)
support_list = alu_engine.alu_type
has_max = any([t == alu_engine.max_pool for t in support_list])
has_avg = any([t == alu_engine.avg_pool for t in support_list])
has_max_reduce = any([t == alu_engine.max_reduce for t in support_list])
else:
pool_engine = DPUTargetHelper.get_pool_engine(target)
support_list = pool_engine.pool_type
has_max = any([t == pool_engine.max for t in support_list])
has_avg = any([t == pool_engine.avg for t in support_list])
has_max_reduce = any([t == pool_engine.max_reduce for t in support_list])
if not ((node.op.type in max_pool_type and (has_max or has_max_reduce)) or (node.op.type in avg_pool_type and has_avg)):
msg = f"{DPUTargetHelper.get_name(target)} does not support {node.op.type}."
return False, msg
kernel_limit = DPUTargetHelper.parse_range("1-8")
stride_limit = DPUTargetHelper.parse_range("1-8")
pad_limit = {}
if DPUTargetHelper.has_alu_engine(target):
alu_engine = DPUTargetHelper.get_alu_engine(target)
if DPUTargetHelper.has_attr(alu_engine, "alu_limit"):
alu_limit = alu_engine.alu_limit
if alu_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(alu_limit.kernel_size)
if alu_limit.stride:
stride_limit = DPUTargetHelper.parse_range(alu_limit.stride)
if DPUTargetHelper.has_attr(alu_engine, "pad_limit"):
alu_pad_limit = alu_engine.pad_limit
if alu_pad_limit.pad_left:
pad_limit["pad_left"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_left)
if alu_pad_limit.pad_right:
pad_limit["pad_right"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_right)
if alu_pad_limit.pad_top:
pad_limit["pad_top"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_top)
if alu_pad_limit.pad_bottom:
pad_limit["pad_bottom"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_bottom)
elif node.op.type in avg_pool_type:
if ksize[0] != ksize[1]:
msg = f"DPU only supports avgpool with square kernel, but this op has kernel {ksize[0]} x {ksize[1]}."
return False, msg
pool_engine = DPUTargetHelper.get_pool_engine(target)
if DPUTargetHelper.has_attr(pool_engine, "avg_limit"):
avg_limit = pool_engine.avg_limit
if avg_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(avg_limit.kernel_size)
if avg_limit.stride:
stride_limit = DPUTargetHelper.parse_range(avg_limit.stride)
elif node.op.type in max_pool_type:
pool_engine = DPUTargetHelper.get_pool_engine(target)
if DPUTargetHelper.has_attr(pool_engine, "max_limit"):
max_limit = pool_engine.max_limit
if max_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(max_limit.kernel_size)
if max_limit.stride:
stride_limit = DPUTargetHelper.parse_range(max_limit.stride)
if node.op.type in max_pool_type and has_max_reduce and ksize[0] not in kernel_limit:
if ksize[0] > 100:
msg = f"'kernel_width'({ksize[0]}) is not in DPU supported range [1, 100]"
return False, msg
if ksize[1] < 1 or ksizse[1] > 2:
msg = f"'kernel_height'({ksize[1]}) is not in DPU supported range [1, 2]"
return False, msg
else:
ret, msg = check_kernel(ksize, kernel_limit)
if not ret:
return ret, msg
iw = node.in_tensors[0].shape[2]
ih = node.in_tensors[0].shape[1]
if iw != ksize[0] or ih != ksize[1]:
ret, msg = check_stride(strides, stride_limit)
if not ret:
return ret, msg
if pad_limit:
ret, msg = check_pad_with_limit(padding, ksize, pad_limit)
if not ret:
return ret, msg
else:
ret, msg = check_pad(padding, ksize)
if not ret:
return ret, msg
return True, msg | null |
23,710 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def check_eltwise_engine(target):
msg = ""
if not DPUTargetHelper.has_eltwise_engine(target):
msg = f"{DPUTargetHelper.get_name(target)} does not have eltwise-engine"
return False, msg
return True, msg
def filter_depthwise_conv2d(node, target):
msg = ""
ret, msg = check_dwconv_engine(target)
if not ret:
return ret, msg
ksize = node.node_attr(node.op.AttrName.KERNEL)
strides = node.node_attr(node.op.AttrName.STRIDE)
dilation = node.node_attr(node.op.AttrName.DILATION)
padding = node.node_attr(node.op.AttrName.PAD)
ic = node.in_tensors[0].shape[3]
oc = node.out_tensors[0].shape[3]
dilated_ksize = list(ksize)
for i in range(len(dilated_ksize)):
dilated_ksize[i] = (ksize[i] - 1) * dilation[i] + 1
kernel_limit = DPUTargetHelper.parse_range("1-16")
stride_limit = DPUTargetHelper.parse_range("1-4")
pad_limit = {}
if DPUTargetHelper.has_alu_engine(target):
alu_engine = DPUTargetHelper.get_alu_engine(target)
channel_parallel = alu_engine.channel_parallel
if DPUTargetHelper.has_attr(alu_engine, "alu_limit"):
alu_limit = alu_engine.alu_limit
if alu_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(alu_limit.kernel_size)
if alu_limit.stride:
stride_limit = DPUTargetHelper.parse_range(alu_limit.stride)
if DPUTargetHelper.has_attr(alu_engine, "pad_limit"):
alu_pad_limit = alu_engine.pad_limit
if alu_pad_limit.pad_left:
pad_limit["pad_left"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_left)
if alu_pad_limit.pad_right:
pad_limit["pad_right"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_right)
if alu_pad_limit.pad_top:
pad_limit["pad_top"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_top)
if alu_pad_limit.pad_bottom:
pad_limit["pad_bottom"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_bottom)
else:
dwconv_engine = DPUTargetHelper.get_dwconv_engine(target)
channel_parallel = dwconv_engine.channel_parallel
if DPUTargetHelper.has_attr(dwconv_engine, "dwconv_limit"):
dwconv_limit = dwconv_engine.dwconv_limit
if dwconv_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(dwconv_limit.kernel_size)
if dwconv_limit.stride:
stride_limit = DPUTargetHelper.parse_range(dwconv_limit.stride)
if DPUTargetHelper.get_type(target) == "DPUCAHX8H":
if strides[0] > ksize[0]:
msg = f"The stride_w({strides[0]}) > kernel_w({ksize[0]}), but {DPUTargetHelper.get_name(target)} only support stride_w <= kernel_w."
return False, msg
ret, msg = check_kernel(ksize, kernel_limit)
if not ret:
return ret, msg
iw = node.in_tensors[0].shape[2]
ih = node.in_tensors[0].shape[1]
if not(iw == ksize[0] and ih == ksize[1]):
ret, msg = check_stride(strides, stride_limit)
if not ret:
return ret, msg
ret, msg = check_load_jump_write(ic, channel_parallel, dilation)
if not ret:
return ret, msg
ret, msg = check_save_jump_read(oc, channel_parallel)
if not ret:
return ret, msg
if DPUTargetHelper.has_alu_engine(target):
ret, msg = check_dwconv_weights_bank_depth(target, DPUTargetHelper.get_alu_engine(target), node.op.get_param(node.op.ParamName.WEIGHTS).shape)
if not ret:
return ret, msg
else:
ret, msg = check_dwconv_weights_bank_depth(target, DPUTargetHelper.get_dwconv_engine(target), node.op.get_param(node.op.ParamName.WEIGHTS).shape)
if not ret:
return ret, msg
if pad_limit:
ret, msg = check_pad_with_limit(padding, dilated_ksize, pad_limit)
if not ret:
return ret, msg
else:
ret, msg = check_pad(padding, dilated_ksize)
if not ret:
return ret, msg
# if DPUTargetHelper.has_alu_engine(target):
# ret, msg = check_nonlinear(DPUTargetHelper.get_alu_engine(target), node)
# if not ret:
# return ret, msg
# else:
# ret, msg = check_nonlinear(DPUTargetHelper.get_dwconv_engine(target), node)
# if not ret:
# return ret, msg
return True, msg
def check_dim_of_inputs_of_mul(node):
msg = ""
if_replaceabel = True
input_shape = node.in_tensors[0].shape
const_node = node.in_tensors[1].node
const_data = const_node.node_attr(const_node.op.AttrName.DATA)
if not isinstance(const_data, list):
const_data = [const_data]
const_data = np.array(const_data)
const_shape = const_data.shape
if_replaceabel = (len(input_shape) <= 4 and
len(const_shape) == 1 and
(input_shape[-1] == const_shape[0] or
const_shape[0] == 1))
if not if_replaceabel:
msg = f"mul's input has the tensor dimension {input_shape} and weights has the tensor dimenstion {const_shape}."
return False, msg
return True, msg
def create_dwconv2d_from_mul(node):
dwconv2d = TorchConv2d(NNDCT_OP.DEPTHWISE_CONV2D)
dwconv2d.set_config('kernel_size', [1, 1])
dwconv2d.set_config('stride', [1, 1])
dwconv2d.set_config('padding', [0, 0])
input_channel = node.in_tensors[0].shape[-1]
weight_tensor = Tensor("weight")
weight_tensor.from_ndarray(np.random.randn(1, 1, 1, input_channel))
dwconv2d.set_param(dwconv2d.ParamName.WEIGHTS, weight_tensor)
return dwconv2d
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_eltwise(node, target):
msg = ""
if node.op.type == NNDCT_OP.MULTIPLY and node.in_tensors[1].node.op.type in [NNDCT_OP.CONST, NNDCT_OP.TENSOR]:
prefix_msg = "Try to convert mul to DepthwiseConv2d failed."
ret, check_msg = check_dim_of_inputs_of_mul(node)
if not ret:
msg = prefix_msg + check_msg
else:
dwconv2d = create_dwconv2d_from_mul(node)
mul_op = node.op
node.op = dwconv2d
input_tensor = node.in_tensors[0]
out_tensor = node.out_tensors[0]
old_shape = input_tensor.shape
new_shape = list(old_shape)
for i in range(4 - len(old_shape)):
new_shape.insert(0, 1)
input_tensor.shape = new_shape
out_tensor.shape = new_shape
ret, check_msg = filter_depthwise_conv2d(node, target)
node.op = mul_op
input_tensor.shape = old_shape
out_tensor.shape = old_shape
if not ret:
msg = prefix_msg + check_msg
else:
return True, msg
ret, check_msg = check_eltwise_engine(target)
if not ret:
return ret, msg + check_msg
eltwise_engine = DPUTargetHelper.get_eltwise_engine(target)
support_list = eltwise_engine.elew_type
if node.op.type == NNDCT_OP.ADD:
has_add = any([t == eltwise_engine.add for t in support_list])
if not has_add:
msg = f"{DPUTargetHelper.get_name(target)} does not support eltwise ADD."
return False, msg
elif node.op.type == NNDCT_OP.MULTIPLY:
has_mul = any([t == eltwise_engine.mult for t in support_list])
if not has_mul:
msg += f"{DPUTargetHelper.get_name(target)} does not support eltwise MUL."
return False, msg
else:
msg = f"{DPUTargetHelper.get_name(target)} does not support {node.op.type}."
return False, msg
return True, msg | null |
23,711 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def check_bilinear_upsample_scale(node):
msg = ""
input_shape = node.in_tensors[0].shape
output_shape = node.out_tensors[0].shape
i_h = input_shape[1]
i_w = input_shape[2]
o_h = output_shape[1]
o_w = output_shape[2]
scale_f = [1.0, 1.0] # [scale_w, scale_h]
scale = []
scale_f[0] = float(o_w) / float(i_w)
scale_f[1] = float(o_h) / float(i_h)
half_pixel_centers = node.node_attr(node.op.AttrName.HALF_PIXEL_CENTERS)
if half_pixel_centers:
allowed_scale = [2, 4]
else:
allowed_scale = [2, 4, 8]
for s_f in scale_f:
if not (math.ceil(s_f) == s_f and math.floor(s_f) == s_f and
any([s== s_f for s in allowed_scale])):
msg = f"{node.op.type} output / input scale is {scale_f}"
return False, msg
scale.append(int(s_f))
if not all([scale[0] == s for s in scale]):
msg = "scale_w is not equal with scale_h"
return False, msg
ret, msg = check_bilinear_upsample_fake_weight(node, scale[1], scale[0])
if not ret:
return ret, msg
node.set_node_attr(node.op.AttrName.SCALE, [float(scale[0]), float(scale[1])])
return True, msg
def create_transpose_dwconv2d_from_bilinear_upsample(node):
transpose_dwconv2d = TorchConvTranspose2d(NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D)
scale_w, scale_h = node.node_attr(node.op.AttrName.SCALE)
scale_w = int(scale_w)
scale_h = int(scale_h)
half_pixel_centers = node.node_attr(node.op.AttrName.HALF_PIXEL_CENTERS)
input_shape = node.in_tensors[0].shape
output_shape = node.out_tensors[0].shape
kernel_h = 2 * scale_h
kernel_w = 2 * scale_w
transpose_dwconv2d.set_config('output_padding', [0, 0])
transpose_dwconv2d.set_config('kernel_size', [kernel_h, kernel_w])
transpose_dwconv2d.set_config('stride', [scale_h, scale_w])
input_w = input_shape[2] + 2
input_h = input_shape[1] + 2
if half_pixel_centers:
pad_l = int(math.floor(float(scale_w) / 2.0 - 0.5))
pad_r = output_shape[2] + int(kernel_w) - 2 - (input_w - 1) * scale_w - pad_l
pad_t = int(math.floor(float(scale_h) / 2.0 - 0.5))
pad_b = output_shape[1] + int(kernel_h) - 2 - (input_h - 1) * scale_h - pad_t
else:
pad_l = scale_w - 1
pad_r = scale_w - 1
pad_t = scale_h - 1
pad_b = scale_h - 1
padding = [int(kernel_w) - 1 - pad_l,
int(kernel_w) - 1 - pad_r,
int(kernel_h) - 1 - pad_t,
int(kernel_h) - 1 - pad_b]
transpose_dwconv2d.set_attr(transpose_dwconv2d.AttrName.PAD_MODE, 0)
transpose_dwconv2d.set_attr(transpose_dwconv2d.AttrName.PAD, padding)
return transpose_dwconv2d
def check_load_jump_write(ic, channel_parallel, dilation=None):
msg = ""
dilation = dilation if dilation is not None else [1, 1]
cp_limit = 256 * channel_parallel
if ic > cp_limit:
msg = f"DPU only supports 'input_channel'({ic}) less than ({cp_limit})"
return False, msg
return True, msg
def filter_transpose_depthwise_conv2d(node, target):
msg = ""
ret, msg = check_dwconv_engine(target)
if not ret:
return ret, msg
ksize = node.node_attr(node.op.AttrName.KERNEL)
strides = node.node_attr(node.op.AttrName.STRIDE)
padding = node.node_attr(node.op.AttrName.PAD)
dilation = node.node_attr(node.op.AttrName.DILATION)
ic = node.in_tensors[0].shape[3]
oc = node.out_tensors[0].shape[3]
output_padding = node.node_config("output_padding")
if any([pad != 0 for pad in output_padding]):
msg = "DPU does not support output_padding."
return False, msg
kernel_limit = DPUTargetHelper.parse_range("1-16")
stride_limit = DPUTargetHelper.parse_range("1-4")
pad_limit = {}
if DPUTargetHelper.has_alu_engine(target):
alu_engine = DPUTargetHelper.get_alu_engine(target)
channel_parallel = alu_engine.channel_parallel
if DPUTargetHelper.has_attr(alu_engine, "alu_limit"):
alu_limit = alu_engine.alu_limit
if alu_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(alu_limit.kernel_size)
if alu_limit.stride:
stride_limit = DPUTargetHelper.parse_range(alu_limit.stride)
if DPUTargetHelper.has_attr(alu_engine, "pad_limit"):
alu_pad_limit = alu_engine.pad_limit
if alu_pad_limit.pad_left:
pad_limit["pad_left"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_left)
if alu_pad_limit.pad_right:
pad_limit["pad_right"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_right)
if alu_pad_limit.pad_top:
pad_limit["pad_top"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_top)
if alu_pad_limit.pad_bottom:
pad_limit["pad_bottom"] = DPUTargetHelper.parse_range(alu_pad_limit.pad_bottom)
else:
dwconv_engine = DPUTargetHelper.get_dwconv_engine(target)
channel_parallel = dwconv_engine.channel_parallel
if DPUTargetHelper.has_attr(dwconv_engine, "dwconv_limit"):
dwconv_limit = dwconv_engine.dwconv_limit
if dwconv_limit.kernel_size:
kernel_limit = DPUTargetHelper.parse_range(dwconv_limit.kernel_size)
if dwconv_limit.stride:
stride_limit = DPUTargetHelper.parse_range(dwconv_limit.stride)
ret, msg = check_transposed_kernel(ksize[0], strides[0], kernel_limit)
if not ret:
return ret, msg
ret, msg = check_transposed_kernel(ksize[1], strides[1], kernel_limit)
if not ret:
return ret, msg
ret, msg = check_stride([1, 1], stride_limit)
if not ret:
return ret, msg
ret, msg = check_load_jump_write(ic, channel_parallel, dilation)
if not ret:
return ret, msg
ret, msg = check_save_jump_read(oc, channel_parallel)
if pad_limit:
ret, msg = check_pad_with_limit(padding, ksize, pad_limit)
if not ret:
return ret, msg
else:
ret, msg = check_pad(padding, ksize)
if not ret:
return ret, msg
# if DPUTargetHelper.has_alu_engine(target):
# ret, msg = check_nonlinear(DPUTargetHelper.get_alu_engine(target), node)
# if not ret:
# return ret, msg
# else:
# ret, msg = check_nonlinear(DPUTargetHelper.get_dwconv_engine(target), node)
# if not ret:
# return ret, msg
return True, msg
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_upsample(node, target):
msg = ""
if node.node_attr(node.op.AttrName.MODE) == "BILINEAR":
prefix_msg = "Try to convert BlinearUpsamle2d to transpose depthwise conv2d failed."
ret, check_msg = check_bilinear_upsample_scale(node)
if not ret:
msg = prefix_msg + check_msg
else:
transpose_dwconv2d = create_transpose_dwconv2d_from_bilinear_upsample(node)
upsample = node.op
node.op = transpose_dwconv2d
ret, check_msg = filter_transpose_depthwise_conv2d(node, target)
node.op = upsample
if not ret:
msg = prefix_msg + check_msg
else:
return True, msg
align_corners = node.node_attr(node.op.AttrName.ALIGN_CORNERS)
if align_corners:
msg = "DPU does not support align_corners = True"
return False, msg
mode = node.node_attr(node.op.AttrName.MODE)
if mode == "BILINEAR":
msg += f"DPU does not support {mode} mode.(only support NEAREST mode)."
return ret, msg
load_engine = DPUTargetHelper.get_load_engine(target)
channel_parallel = load_engine.channel_parallel
ic = node.in_tensors[0].shape[3]
ret, msg = check_load_jump_write(ic, channel_parallel)
if not ret:
return ret, msg
return True, msg | null |
23,712 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DeviceType(AutoName):
CPU = auto()
DPU = auto()
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_reshape(node, target):
msg = ""
if DPUTargetHelper.get_type(target) == "DPUCADF8H":
return False, msg
input_node = node.owning_graph.parents(node)[0]
if not (input_node.target_device and input_node.target_device.get_device_type() == DeviceType.DPU):
return False, msg
return True, msg | null |
23,713 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def check_load_jump_write(ic, channel_parallel, dilation=None):
msg = ""
dilation = dilation if dilation is not None else [1, 1]
cp_limit = 256 * channel_parallel
if ic > cp_limit:
msg = f"DPU only supports 'input_channel'({ic}) less than ({cp_limit})"
return False, msg
return True, msg
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_pad(node, target):
msg = ""
mode = node.node_attr(node.op.AttrName.MODE)
if mode not in [0, 2]: #DPU only support CONSTANT / SYMMETRIC mode
msg = f"DPU only support CONSTANT or SYMMETRIC mode."
return False, msg
load_engine = DPUTargetHelper.get_load_engine(target)
channel_parallel = load_engine.channel_parallel
ic = node.in_tensors[0].shape[3]
ret, msg = check_load_jump_write(ic, channel_parallel)
if not ret:
return ret, msg
return True, msg | null |
23,714 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DPUTargetHelper(object):
def get_basic_info(dpu_target):
return f"name: {dpu_target.name}\ntype: {dpu_target.type}\nisa_version: {dpu_target.isa_version}"
def get_full_info(dpu_target):
return dpu_target._legacy_dpu_target_def
def parse_range(num_range):
new_range = []
token = ","
single_pattern = re.compile(r"""\s*(\d+)\s*""")
range_pattern = re.compile(r"""\s*(\d+)\s*-\s*(\d+)\s*""")
for num_item in num_range.split(token):
if "-" in num_item:
result = range_pattern.match(num_item)
lower = int(result.group(1))
upper = int(result.group(2))
new_range.extend(list(range(lower, upper + 1)))
else:
result = single_pattern.match(num_item)
num = int(result.group(1))
new_range.append(num)
return new_range
def has_attr(message, member):
assert hasattr(message, "ByteSize")
return hasattr(message, member) and getattr(message, member).ByteSize() > 0
def get_name(dpu_target):
return dpu_target.get_name()
def get_type(dpu_target):
return dpu_target.get_type()
def get_conv_engine(dpu_target):
return dpu_target.get_conv_engine()
def get_alu_engine(dpu_target):
return dpu_target.get_alu_engine()
def get_pool_engine(dpu_target):
return dpu_target.get_pool_engine()
def get_eltwise_engine(dpu_target):
return dpu_target.get_eltwise_engine()
def has_alu_engine(dpu_target):
return hasattr(dpu_target, "alu_engine") and dpu_target.get_alu_engine().ByteSize() > 0
def has_pool_engine(dpu_target):
return hasattr(dpu_target, "pool_engine") and dpu_target.get_pool_engine().ByteSize() > 0
def has_dwconv_engine(dpu_target):
return hasattr(dpu_target, "dwconv_engine") and dpu_target.get_dwconv_engine().ByteSize() > 0
def has_eltwise_engine(dpu_target):
return hasattr(dpu_target, "eltwise_engine") and dpu_target.get_eltwise_engine().ByteSize() > 0
def get_bank_group(dpu_target):
return dpu_target.get_bank_group()
def get_load_engine(dpu_target):
return dpu_target.get_load_engine()
def get_dwconv_engine(dpu_target):
return dpu_target.get_dwconv_engine()
def filter_hard_sigmoid(node, target):
msg = ""
if not DPUTargetHelper.has_alu_engine(target):
msg = "This target does not support single hard-sigmoid."
return False, msg
return True, msg | null |
23,715 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def filter_leaky_relu(node, target):
msg = ""
alpha = node.node_attr(node.op.AttrName.ALPHA)
dpu_alpha = 26.0 / 256
if alpha != dpu_alpha:
msg = f"Its alpa is {alpha}, but DPU only support {dpu_alpha}."
return False, msg
return True, msg | null |
23,716 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DeviceType(AutoName):
CPU = auto()
DPU = auto()
class DeviceInfo(object):
def __init__(self, device_type):
assert isinstance(device_type, DeviceType)
self._type = device_type
self._device_partition_check_msg = None
def get_device_type(self):
return self._type
def set_filter_message(self, msg):
self._device_partition_check_msg = msg
def get_filter_message(self):
return self._device_partition_check_msg
def clear_filter_message(self):
self._device_partition_check_msg = None
def merge_permute_to_matmul(graph, target):
def handler(*args, **kwargs):
_, node_set = args
permute_node = node_set[0]
dense_node = node_set[-1]
if permute_node.target_device and permute_node.target_device.get_device_type() == DeviceType.DPU:
return
if permute_node.node_attr(permute_node.op.AttrName.ORDER) == [0, 3, 1, 2] and dense_node.target_device:
permute_node.target_device = DeviceInfo(dense_node.target_device.get_device_type())
permute_node.target_device.clear_filter_message()
graph_searcher = GraphSearcher(graph)
_ = graph_searcher.find_nodes_from_type(
[PatternType(pattern=[NNDCT_OP.PERMUTE, NNDCT_OP.FLATTEN, NNDCT_OP.DENSE],
action=handler),
PatternType(pattern=[NNDCT_OP.PERMUTE, NNDCT_OP.RESHAPE, NNDCT_OP.DENSE],
action=handler),
]) | null |
23,717 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
def filter_concat(node, target):
ret = True
msg = ""
if any([not pn.target_device or pn.target_device.get_device_type() == DeviceType.CPU for pn in node.owning_graph.parents(node)]):
msg += "The input of concat is not in DPU subgraph."
dimension = node.out_tensors[0].ndim
if dimension != 4:
msg += "And output dimension is not 4."
ret = False
else:
if node.node_attr(node.op.AttrName.AXIS) != 3:
msg += "And it's not a channel-wise concatenation."
ret = False
if DPUTargetHelper.get_name(target) == "DPUCADF8H":
dimension = node.out_tensors[0].ndim
if dimension != 4:
msg += "Output dimension is not 4."
ret = False
else:
if node.node_attr(node.op.AttrName.AXIS) != 3:
msg += "It's not a channel-wise concatenation."
ret = False
return ret, msg
class DeviceType(AutoName):
CPU = auto()
DPU = auto()
class DeviceInfo(object):
def __init__(self, device_type):
assert isinstance(device_type, DeviceType)
self._type = device_type
self._device_partition_check_msg = None
def get_device_type(self):
return self._type
def set_filter_message(self, msg):
self._device_partition_check_msg = msg
def get_filter_message(self):
return self._device_partition_check_msg
def clear_filter_message(self):
self._device_partition_check_msg = None
def filter_dpu_interface_concat(graph, target):
def handler(*args, **kwargs):
_, node_set = args
concat = node_set[0]
if concat.target_device and concat.target_device.get_device_type() == DeviceType.DPU:
return
ret, msg = filter_concat(concat, target)
if ret:
concat.target_device = DeviceInfo(DeviceType.DPU)
concat.target_device.clear_filter_message()
else:
concat.target_device = DeviceInfo(DeviceType.CPU)
concat.target_device.set_filter_message(msg)
graph_searcher = GraphSearcher(graph)
_ = graph_searcher.find_nodes_from_type(
[PatternType(pattern=[NNDCT_OP.CONCAT],
action=handler)]) | null |
23,718 | import math
import numpy as np
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import PatternType
from nndct_shared.nndct_graph import GraphSearcher, Tensor
from pytorch_nndct.parse.torch_op_def import TorchConvTranspose2d, TorchConv2d
from .device import DeviceInfo, DeviceType
from .target_helper import DPUTargetHelper
class DeviceType(AutoName):
CPU = auto()
DPU = auto()
class DeviceInfo(object):
def __init__(self, device_type):
assert isinstance(device_type, DeviceType)
self._type = device_type
self._device_partition_check_msg = None
def get_device_type(self):
return self._type
def set_filter_message(self, msg):
self._device_partition_check_msg = msg
def get_filter_message(self):
return self._device_partition_check_msg
def clear_filter_message(self):
self._device_partition_check_msg = None
def filter_dpu_interface_reshape(graph, target):
def handler(*args, **kwargs):
_, node_set = args
reshape = node_set[0]
if not (reshape.target_device and reshape.target_device.get_device_type() == DeviceType.DPU):
return
input_node = reshape.owning_graph.parents(reshape)[0]
if input_node.target_device and input_node.target_device.get_device_type() == DeviceType.DPU \
and all([cn.target_device and cn.target_device.get_device_type() == DeviceType.DPU for cn in reshape.owning_graph.children(reshape)]):
# an internal reshape, do nothing
pass
else:
if input_node.out_tensors[0].shape[0] != reshape.out_tensors[0].shape[0]:
msg = "First dimension is changed."
reshape.target_device = DeviceInfo(DeviceType.CPU)
reshape.target_device.set_filter_message(msg)
graph_searcher = GraphSearcher(graph)
_ = graph_searcher.find_nodes_from_type(
[PatternType(pattern=[NNDCT_OP.RESHAPE],
action=handler),
PatternType(pattern=[NNDCT_OP.FLATTEN],
action=handler),
]) | null |
23,719 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
_OP_CONVERTER_DICT = {}
def create_op(gm, op, target, args, kwargs):
def get_new_target():
if op == "call_module":
return gm.get_submodule(target).__class__
elif op == "call_method":
return getattr(torch.Tensor, target)
else:
return target
convert_info = _OP_CONVERTER_DICT.get((op, get_new_target()), _OP_CONVERTER_DICT[(op, "")])
normalized_kwargs = kwargs
if convert_info.args_kwargs_mapping_fn:
if op == "call_module":
mod = gm.get_submodule(target)
normalized_kwargs = convert_info.args_kwargs_mapping_fn(target, mod, args, kwargs)
return convert_info.convert_fn(*args, **normalized_kwargs) | null |
23,720 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
_OP_CONVERTER_DICT = {}
OpConvertInfo = namedtuple("OpConvertFnInfo", ["convert_fn", "args_kwargs_mapping_fn"])
def register_nndct_op(op, target, args_kwargs_mapping_fn=None):
def register(op_convert_func):
op_and_target = (op, target)
assert op_and_target not in _OP_CONVERTER_DICT.keys()
_OP_CONVERTER_DICT[op_and_target] = OpConvertInfo(op_convert_func, args_kwargs_mapping_fn)
return op_convert_func
return register | null |
23,721 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def input_arg(*args, **kwargs):
op = TorchBaseOperation(NNDCT_OP.INPUT)
return op | null |
23,722 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def call_method(*args, **kwargs):
op = TorchBaseOperation(NNDCT_OP.CALL_METHOD)
return op | null |
23,723 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def call_module(*args, **kwargs):
op = TorchBaseOperation(NNDCT_OP.CALL_MODULE)
return op | null |
23,724 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def call_function(*args, **kwargs):
op = TorchBaseOperation(NNDCT_OP.CALL_FUNCTION)
return op | null |
23,725 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def output(*args, **kwargs):
op = TorchBaseOperation(NNDCT_OP.RETURN)
return op | null |
23,726 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def int2tuple(obj, tuple_size):
def convert_real_tensor(name, real_tensor):
def conv_nd_mapping_fn(mod_qual_name, mod, args, kwargs):
weight_tensor = convert_real_tensor(".".join([mod_qual_name, "weight"]), mod.weight)
bias_tensor = convert_real_tensor(".".join([mod_qual_name, "bias"]), mod.bias)
nd = weight_tensor.ndim - 2
stride = int2tuple(mod.stride, nd)
padding = int2tuple(mod.padding, nd)
dilation = int2tuple(mod.dilation, nd)
groups = mod.groups
return {"input": kwargs["input"],
"weight": weight_tensor,
"bias": bias_tensor,
"stride": stride,
"padding": padding,
"dilation": dilation,
"groups": groups
} | null |
23,727 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def _convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups):
weight_size = weight.shape
if transposed:
weight_size[0], weight_size[1] = weight_size[1], weight_size[0]
if weight_size[0] == 1 and groups == weight_size[1]:
if weight.ndim == 4:
op = TorchConvTranspose2d(NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D)
elif weight.ndim == 5:
op = TorchConvTranspose3d(NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D)
elif weight.ndim == 3:
raise NotImplementedError("Depthwise_ConvTranpose1D is unsupported")
else:
if weight.ndim == 4:
op = TorchConvTranspose2d(NNDCT_OP.CONVTRANSPOSE2D)
elif weight.ndim == 5:
op = TorchConvTranspose3d(NNDCT_OP.CONVTRANSPOSE3D)
elif weight.ndim == 3:
raise NotImplementedError("ConvTranpose1D is unsupported")
op.set_config("output_padding", list(output_padding))
op.set_config('in_channels', weight_size[1])
op.set_config('out_channels', weight_size[0] * groups)
else:
if weight_size[1] == 1 and groups == weight_size[0]:
if weight.ndim == 4:
op = TorchConv2d(NNDCT_OP.DEPTHWISE_CONV2D)
elif weight.ndim == 5:
op = TorchConv3d(NNDCT_OP.DEPTHWISE_CONV3D)
elif weight.ndim == 3:
op = TorchConv1d(NNDCT_OP.DEPTHWISE_CONV1D)
else:
if weight.ndim == 4:
op = TorchConv2d(NNDCT_OP.CONV2D)
elif weight.ndim == 5:
op = TorchConv3d(NNDCT_OP.CONV3D)
elif weight.ndim == 3:
op = TorchConv1d(NNDCT_OP.CONV1D)
op.set_config('in_channels', weight_size[1] * groups)
op.set_config('out_channels', weight_size[0])
# Should add weight first
op.set_param(op.ParamName.WEIGHTS, weight)
if bias is not None:
op.set_config('bias', True)
op.set_param(op.ParamName.BIAS, bias)
else:
op.set_config('bias', False)
op.set_config('dilation', list(dilation))
op.set_config('kernel_size', list(weight_size[2:]))
op.set_config('stride', list(stride))
op.set_config('groups', groups)
op.set_config('padding', list(padding))
return op
def conv2d(*, input, weight, bias, stride, padding, dilation, groups):
return _convolution(input, weight, bias, stride, padding, dilation, False, None, groups) | null |
23,728 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def relu(*, input, inplace):
# s
op = TorchReLU()
op.set_config('inplace', inplace)
return op | null |
23,729 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def convert_real_tensor(name, real_tensor):
if real_tensor is None:
return
tensor = Tensor(
name=name,
shape=convert_shape(real_tensor.shape),
dtype=convert_dtype(real_tensor.dtype),
data = real_tensor.cpu().detach().numpy(),
# device=get_tensor_meta_info(input_meta, "device"),
requires_grad=real_tensor.requires_grad)
return tensor
def linear_mapping_fn(mod_qual_name, mod, args, kwargs):
weight_tensor = convert_real_tensor(".".join([mod_qual_name, "weight"]), mod.weight)
bias_tensor = convert_real_tensor(".".join([mod_qual_name, "bias"]), mod.bias)
return {"input": kwargs["input"],
"weight": weight_tensor,
"bias": bias_tensor
} | null |
23,730 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def linear(*, input, weight, bias):
op = TorchLinear()
weight_size = weight.shape
op.set_param(op.ParamName.WEIGHTS, weight)
if bias is None:
op.set_config("bias", False)
else:
op.set_config("bias", True)
op.set_param(op.ParamName.BIAS, bias)
op.set_config('out_features', weight_size[0])
op.set_config('in_features', weight_size[1])
return op | null |
23,731 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def flatten(*, input, start_dim, end_dim):
op = TorchFlatten()
op.set_config('input', input)
op.set_config("start_dim", start_dim)
op.set_config("end_dim", end_dim)
return op | null |
23,732 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def adaptive_avg_pool2d(*, input, output_size):
op = TorchAdaptiveAvgPool()
op.set_config("output_size", output_size)
return op | null |
23,733 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def int2tuple(obj, tuple_size):
if isinstance(obj, numbers.Integral):
return (obj, ) * tuple_size
else:
return obj
def maxpool2d(*, input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):
op = TorchMaxPool()
op.set_config("return_indices", return_indices)
op.set_config('kernel_size', list(int2tuple(kernel_size, 2)))
if not stride:
op.set_config('stride', list(int2tuple(kernel_size, 2)))
else:
op.set_config('stride', list(int2tuple(stride, 2)))
if ceil_mode:
op.set_config('ceil_mode', True)
else:
op.set_config('ceil_mode', False)
op.set_config('padding', list(int2tuple(padding, 2)))
op.set_config('dilation', list(int2tuple(dilation, 2)))
return op | null |
23,734 | import numbers
from collections import namedtuple
import torch
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from pytorch_nndct.parse.torch_op_def import *
from pytorch_nndct.fx.translator_utils import convert_dtype, convert_shape
def add(*, input, other, alpha):
op = TorchAdd()
op.set_config('input', input)
op.set_config('other', other)
op.set_config('alpha', alpha)
return op | null |
23,735 | import torch
from collections import OrderedDict
from nndct_shared.nndct_graph import Graph, Block, Node, Tensor
from pytorch_nndct.fx.convert_op import create_op
from pytorch_nndct.fx.translator_utils import convert_dtype, get_meta_info, convert_shape
import nndct_shared.utils.tensor_util as tensor_util
from nndct_shared.base.key_names import FrameworkType
def short_name(gm, node: torch.fx.Node):
if node.op == "call_function":
return node.target.__name__
elif node.op in ["placeholder", "call_method"]:
return node.target
elif node.op == "call_module":
return gm.get_submodule(node.target).__class__.__name__
elif node.op == "get_attr":
return node.target
elif node.op == "output":
return "output"
raise AssertionError(node.op) | null |
23,736 | import torch
from collections import OrderedDict
from nndct_shared.nndct_graph import Graph, Block, Node, Tensor
from pytorch_nndct.fx.convert_op import create_op
from pytorch_nndct.fx.translator_utils import convert_dtype, get_meta_info, convert_shape
import nndct_shared.utils.tensor_util as tensor_util
from nndct_shared.base.key_names import FrameworkType
def get_meta_info(meta, info_type):
return getattr(meta, info_type)
def convert_dtype(dtype):
r"""convert torch dtype to nndct dtype"""
return {
'torch.float': 'float32',
'torch.float32': 'float32',
'torch.double': 'float64',
'torch.int': 'int32',
'torch.long': 'int64'
}.get(dtype.__str__(), dtype)
def convert_shape(shape):
return list(shape)
def make_tensor(name, meta):
tensor = Tensor(
name=name,
shape=convert_shape(get_meta_info(meta, "shape")),
dtype=convert_dtype(get_meta_info(meta, "dtype")),
# device=get_tensor_meta_info(input_meta, "device"),
requires_grad=get_meta_info(meta, "requires_grad"))
return tensor | null |
23,737 | import torch
from collections import OrderedDict
from nndct_shared.nndct_graph import Graph, Block, Node, Tensor
from pytorch_nndct.fx.convert_op import create_op
from pytorch_nndct.fx.translator_utils import convert_dtype, get_meta_info, convert_shape
import nndct_shared.utils.tensor_util as tensor_util
from nndct_shared.base.key_names import FrameworkType
class FrameworkType(object):
# Frontend types
TORCH = 'torch'
CAFFE = 'caffe'
TENSORFLOW = 'tensorflow'
TF_KERAS = 'tf_keras'
# NNDCT as a bridge
NNDCT = 'nndct'
def convert_op_param(op):
for param_name, tensor in op.params.items():
tensor = tensor_util.convert_parameter_tensor_format(
tensor, FrameworkType.TORCH, FrameworkType.NNDCT) | null |
23,738 | import operator
import itertools
import torch
from pytorch_nndct.fx.optimization.utils import replace_node
def always_true(*args, **kwargs):
return True | null |
23,739 | import copy
import os
from typing import Any, Optional, Sequence, Union, List, Dict, Tuple
import torch
from nndct_shared.utils import NndctScreenLogger, NndctOption, QError, QWarning, QNote
from .qproc import TorchQuantProcessor
from .qproc import base as qp
from .qproc import LSTMTorchQuantProcessor, RNNQuantProcessor
from .qproc import vaiq_system_info
from .quantization import QatProcessor
from .parse.rich_in_out_helper import StandardInputData
def dump_xmodel(output_dir="quantize_result", deploy_check=False, app_deploy="CV"):
if app_deploy == "CV": lstm_app = False
elif app_deploy == "NLP": lstm_app = True
NndctScreenLogger().warning(f"The function dump_xmodel() will retire in future version. Use torch_quantizer.export_xmodel() reversely.")
qp.dump_xmodel(output_dir, deploy_check, lstm_app) | null |
23,740 | from nndct_shared.utils import set_option_value, NndctOption
from pytorch_nndct.qproc.utils import (get_deploy_graph_list,
prepare_quantizable_module,
register_output_hook,
set_outputs_recorder_status, to_device)
def register_output_hook(module: torch.nn.Module, record_once: bool = True) -> NoReturn:
def set_outputs_recorder_status(module, turn_on) -> NoReturn:
def prepare_quantizable_module(
module: torch.nn.Module,
input_args,
export_folder: str,
state_dict_file: Optional[str] = None,
quant_mode: int = 1,
device: "torch.device" = torch.device("cuda"),
connect_qm_with_graph=True) -> Tuple[torch.nn.Module, Graph]:
def get_deploy_graph_list(quant_model, nndct_graph, need_partition=True):
def prepare_deployable_graph(module, input_args, device, output_dir):
module, input_args = to_device(module, input_args, device)
quant_module, graph = prepare_quantizable_module(
module=module,
input_args=input_args,
export_folder=output_dir,
device=device)
if len(graph.all_blocks()) > 1:
quant_module.from_script(True)
else:
quant_module.from_script(False)
quant_off_stat = NndctOption.nndct_quant_off.value
param_corr_stat = NndctOption.nndct_param_corr.value
set_option_value("nndct_quant_off", True)
set_option_value("nndct_param_corr", False)
register_output_hook(quant_module, record_once=True)
set_outputs_recorder_status(quant_module, True)
quant_module.eval()
if isinstance(input_args, tuple):
_ = quant_module.to(device)(*input_args)
else:
_ = quant_module.to(device)(input_args)
deploy_graphs, dev_graph = get_deploy_graph_list(quant_module, graph, need_partition=False)
set_option_value("nndct_quant_off", quant_off_stat)
set_option_value("nndct_param_corr", param_corr_stat)
return dev_graph, deploy_graphs | null |
23,741 | from nndct_shared.utils import set_option_value, NndctOption
from pytorch_nndct.qproc.utils import (get_deploy_graph_list,
prepare_quantizable_module,
register_output_hook,
set_outputs_recorder_status, to_device)
def assign_device_to_node(node_device_map):
for node, device in node_device_map.items():
node.target_device = device | null |
23,742 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
class ModuleHooker(object):
def name_modules(module):
def add_outputs(module, record_once):
def add_output_intime(module):
def add_input_dump_time(module):
def apply_to_children(module, func):
def turn_on_record_outputs(module):
def turn_off_record_outputs(module):
def clear_record_outputs(module):
def turn_on_output_intime(module):
def turn_off_output_intime(module):
def clear_op_called_times(module):
def turn_on_input_dump(module):
def turn_off_input_dump(module):
def clear_input_called_times(module):
def register_state_dict_hook(cls, module):
def _resume_state_dict_key(op, destination, prefix, local_meta_data):
def _state_dict_hooker(op, hooker):
def register_output_hook(cls, module, record_once=True):
def _record_outputs(op, inputs, outputs):
def get_outputs_value(outputs):
def _output_hooker(op, record_func):
def register_input_dump_hook(cls, module):
def dump_input_data(module, inputs):
def inputs_flatten(tensors, tensors_flatten):
def register_output_intime_hook(cls, module):
def _dump_intime(op, inputs, outputs):
def get_outputs_value(outputs):
def dump_output_data(op, output_data, index=None):
def dump_output(op, output_data):
def _output_hooker(op, output_func):
def detach_node_from_module(cls, module):
def _del_node(op):
def hook_module_with_node(cls, module, graph):
def _add_node_on_module(op):
def update_parameters(cls, module, graph, graph2module):
def safe_torch_nn_Parameter(torch_tensor, requires_grad):
def _graph2module(op):
def _module2graph(op):
def _get_output_data(outptus, outptus_name):
def _get_output_shape(outputs, outputs_name):
def update_blobs_once(cls, module, graph=None, time_step=None, update_shape_only=False):
def _updata_tensor_data(node, nndct_tensor, torch_tensor):
def _updata_tensor_shape(node, nndct_tensor, torch_tensor):
def _update_node_outputs(op):
def clone_quant_module(cls, quant_module, quant_graph):
def hook_module_with_quantizer(cls, module, quantizer):
def _add_quantizer_on_module(op):
def hook_module_with_input_device_checker(cls, module, module_gen_from_script):
def _check_input_args(op, input):
def register_tensor_dtype_and_shape_hook(cls, module):
def _record_dtype_and_shape(op, inputs, outputs):
def _get_output_dtype_and_shape(output):
def _hooker(op, record_func):
def disconnect_modeule_with_graph(module):
ModuleHooker.detach_node_from_module(module) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.