id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
23,543 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger, QError
from nndct_shared.quantization import kernel_need_quant
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import maybe_get_quantizer
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
from pytorch_nndct.utils import Const
from .fix_ops import NndctISqrt
class deephi_GroupNorm(torch.nn.modules.normalization.GroupNorm):
r"""DeePhi group normalization operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_GroupNorm, self).__init__(*args, **kwards)
self.params_name = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.param_saved = False
self.param_quantized = False
def forward(self, input):
if not kernel_need_quant(self.quantizer, self.node):
output = super().forward(input)
output = quantize_tensors([output], self.node)[0]
return output
# quantize input tensor
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
params = []
if self.weight is not None:
params.append(self.weight)
if self.bias is not None:
params.append(self.bias)
param_names = self.params_name[:len(params)]
if len(params) != len(param_names):
NndctScreenLogger().error2user(QError.PARAM_NUMBER, f"Parameter number error in node {self.node} for InstanceNorm operator!")
if (not self.param_quantized) and len(params) > 0:
inplace = self.quantizer is not None and self.quantizer.inplace
# quantize weights and bias
if inplace:
_ = quantize_tensors(
params,
self.node,
tensor_names=param_names,
tensor_type='param')
qparams = [p for p in params]
else:
qparams = quantize_tensors(
params,
self.node,
tensor_names=param_names,
tensor_type='param')
if not NndctOption.nndct_quant_off.value:
self.param_quantized = True
else:
qparams = [p for p in params]
# quantization configure
input_name = self.node.in_nodes[0]
input_node = self.quantizer.configer.get_Nndctnode(input_name)
if not self.quantizer.configer.node_output_quantizable(input_node):
input_name = input_node.in_nodes[0]
if NndctOption.nndct_op_groupnorm_mode.value == "ipu_8bw" and (not self.quantizer.exporting):
ifp = self.quantizer.get_quant_config(input_name, False)[1]
wfp = self.quantizer.get_quant_config(self.params_name[0], False, tensor_type='param')[1] if self.affine else None
bfp = self.quantizer.get_quant_config(self.params_name[1], False, tensor_type='param')[1] if self.affine else None
output = self.simulateGroupNorm(qinput, self.num_groups, self.affine, qparams[0], qparams[1], ifp, wfp, bfp)
output = quantize_tensors([output], self.node, method=4)[0]
else:
output = torch.nn.functional.group_norm(
qinput,
self.num_groups,
qparams[0] if self.param_quantized else None,
qparams[1] if self.param_quantized else None,
self.eps
)
output = quantize_tensors([output], self.node)[0]
return output
# e.g. inp (N, C, H, W)
def simulateGroupNorm(self, inp, num_groups, affine, weight, bias, ifp, wfp, bfp):
# inp fake int8: [-256, 255]
inp = torch.floor(inp * (2**ifp)) # float32
inp = inp.to(torch.bfloat16) # bfloat16
# inp reshape: (N, G, C//G, H, W)
G = self.num_groups
if inp.dim() == 3: # [N, C, L]
N, C, L = inp.shape
inp_group = inp.reshape(N, G, C//G, L)
dim = [2, 3]
elif inp.dim() == 4: # [N, C, H, W]
N, C, H, W = inp.shape
inp_group = inp.reshape(N, G, C//G, H, W)
dim = [2, 3, 4]
elif inp.dim() == 5: # [N, C, L, H, W]
N, C, D, H, W = inp.shape
inp_group = inp.reshape(N, G, C//G, D, H, W)
dim = [2, 3, 4, 5]
else:
NndctScreenLogger().error2user(QError.INPUT_DIMENSION, f"Input dimension error in node {self.node}! The dimension of input is {inp.dim()}.")
numel = inp_group[0][0].numel()
# mean: aie_mean equals to torch.mean(inp, dim, keepdim=True) except adding
# aid_add input reshape
NG, CG = inp_group.shape[0], inp_group.shape[1] # inp_group: (N, G, C//G, H, W)=(NG, CG, DG, HG, WG)
inp_temp1 = inp_group.reshape(NG, CG, -1) # (NG, CG, DG*HG*WG), bfloat16
numel = inp_temp1[0][0].numel() # element number
inp_temp1 = inp_group.reshape(-1, inp_temp1.shape[-1]) # (NG*CG, DG*HG*WG), bfloat16
inp_temp2 = inp_temp1.reshape(inp_temp1.shape[0], -1, 8) # (NG*CG, DG*HG*WG/8, 8), bfloat16
inp_v8 = inp_temp2.sum(dim=-2, keepdim=False, dtype=torch.float32) # (NG*CG, 8), float32
# aie add v8
sum = py_utils.aie_add_v8(inp_v8) # aie sum: (NG*CG,), float32
# aie mean
sum = sum.reshape(NG, CG) # (NG, CG), float32
mean = sum/numel # (NG, CG), float32
# mean unsqueeze: (NG, CG, 1, 1)
for i in range(mean.dim(), inp_group.dim()):
mean = torch.unsqueeze(mean, -1)
mean = mean.to(torch.bfloat16) # (NG, CG, 1, 1, 1)
# x - mu
sub = inp_group - mean # (NG, CG, DG, HG, WG), bfloat16
sub = sub.to(torch.float32) # float32
# var
square = torch.square(sub) # float32
var = square.mean(dim, keepdim=False, dtype=torch.float32) # (NG, CG), float32: dim=(2, 3, 4)
# isqrt: 1/sqrt(var)
isqrt = torch.empty_like(var)
NndctISqrt(var, isqrt) # CUDA/CPU: float32
isqrt = isqrt.to(torch.bfloat16)
# mul: (x-mu)*(1/sigma)
for i in range(isqrt.dim(), sub.dim()): # isqrt shape: (NG, CG, 1, 1, 1)
isqrt = torch.unsqueeze(isqrt, dim=-1)
mul = torch.mul(sub, isqrt) # float32, (NG, CG, DG, HG, WG)
mul = mul.to(torch.bfloat16).to(torch.float32) # float32
mul = mul.reshape(N, C, H, W) # (N, C, H, W)
# affine: (x - mu)/sigma*gamma + beta
if affine:
weight = weight.repeat(mul.shape[0], 1) # repeat C into (N, C)
bias = bias.repeat(mul.shape[0], 1) # repeat C into (N, C)
for i in range(weight.dim(), mul.dim()): # unsqueeze (N, C) into (N, C, 1), (N, C, 1, 1) or (N, C, 1, 1, 1)
weight = torch.unsqueeze(weight, -1) # 1d (N, C, 1); 2d (N, C, 1, 1); 3d (N, C, 1, 1, 1)
bias = torch.unsqueeze(bias, -1) # 1d (N, C, 1); 2d (N, C, 1, 1); 3d (N, C, 1, 1, 1)
out = mul*weight + bias # (x-mu)/sigma*gamma + beta, float32
out = out.to(torch.bfloat16).to(torch.float32) # float32
else: # (x - mu)/sigma
out = mul
return out
def GroupNorm(*args, **kwargs):
return deephi_GroupNorm(*args, **kwargs) | null |
23,544 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger, QError, QWarning
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
class deephi_Conv1d(torch.nn.modules.conv.Conv1d):
r"""DeePhi Conv1d operation, support float and double"""
def __init__(self, *args, **kwargs):
super(deephi_Conv1d, self).__init__(*args, **kwargs)
self.params_name = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.param_saved = False
self.param_quantized = False
# self.weight and self.bias are not quantized float parameters
self.weight_bak = None # backup of float bias for bias correction
self.bias_bak = None # backup of float bias for bias correction
self.stop = False
self.rate = NndctOption.nndct_param_corr_rate.value
self.efficency = 0.0
self.deviation = 0.0
def forward(self, input):
# backup bias for bias correction feature
if (not self.param_saved):
if NndctOption.nndct_param_corr.value > 0:
# backup orignal float parameters
if self.quant_mode == 1:
self.weight_bak = self.weight.detach().clone()
if self.bias is not None:
self.bias_bak = self.bias.detach().clone()
# adjust bias
if self.quant_mode == 2 and self.bias is not None:
if not self.quantizer.has_bias_corr(self.node):
NndctScreenLogger().error2user(QError.BIAS_CORRECTION, f"Bias correction file in quantization result directory does not match current model.")
exit(2)
self.bias.data = torch.sub(self.bias.data, torch.tensor(
self.quantizer.get_bias_corr(self.node),
device=self.bias.data.device,
dtype=self.bias.data.dtype))
self.param_saved = True
# quantize parameters
qweight = None
qbias = None
inplace = (NndctOption.nndct_quant_off.value or
self.quantizer is not None and self.quantizer.inplace)
if (not self.param_quantized):
if inplace:
_ = quantize_tensors(
[self.weight],
self.node,
tensor_names = [self.params_name[0]],
tensor_type = 'param')[0]
qweight = self.weight
if self.bias is not None:
_ = quantize_tensors(
[self.bias],
self.node,
tensor_names = [self.params_name[1]],
tensor_type = 'param')[0]
qbias = self.bias
else:
qweight = quantize_tensors(
[self.weight],
self.node,
tensor_names = [self.params_name[0]],
tensor_type = 'param')[0]
if self.bias is not None:
qbias = quantize_tensors(
[self.bias],
self.node,
tensor_names = [self.params_name[1]],
tensor_type = 'param')[0]
if not NndctOption.nndct_quant_off.value:
self.param_quantized = True
else:
qweight = self.weight
qbias = self.bias
# quantize input tensor
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = torch.nn.functional.conv1d(qinput,
weight = qweight,
bias = qbias,
stride = self.stride,
padding = self.padding,
dilation = self.dilation,
groups = self.groups)
output = quantize_tensors([output], self.node)[0]
# correct weights and bias in calibation
if NndctOption.nndct_param_corr.value > 0:
#rate = NndctOption.nndct_param_corr_rate.value
# statistic of quantization error
if (self.quant_mode == 1 and not self.stop):
res_f = torch.nn.functional.conv1d(input,
self.weight_bak,
bias = self.bias_bak,
stride = self.stride,
padding = self.padding,
dilation = self.dilation,
groups = self.groups)
error, rate, self.stop, self.efficency, self.deviation = eval_qnoise(
output,
res_f,
self.efficency,
self.deviation,
self.rate,
self.stop)
if (not self.stop) and (self.bias is not None):
error = error.mean(dim = [0, 1, 2])
self.bias.data = torch.sub(self.bias.data, error, alpha=rate)
self.param_quantized = False
return output
def bias_corr(self):
if self.bias is not None and self.bias_bak is not None:
bias_err = torch.sub(self.bias_bak, self.bias.data)
return bias_err.cpu().numpy().tolist()
else:
return None
def Conv1d(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.Conv1d(*args, **kwargs)
return deephi_Conv1d(*args, **kwargs) | null |
23,545 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Sub(torch.nn.Module):
def __init__(self):
def forward(self, input, other, alpha=1):
def Sub(*args, **kwargs):
return deephi_Sub(*args, **kwargs) | null |
23,546 | import torch
import numpy as np
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS
from .tanh_table import *
from .fix_ops import NndctTanhTableLookup, NndctTanhSimulation, NndctTanhTableLookupAIE2
import pytorch_nndct.utils as py_utils
class deephi_Tanh(torch.nn.modules.Tanh):
r"""DeePhi Tanh operation"""
def __init__(self):
super(deephi_Tanh, self).__init__()
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
if self.quant_mode == 0 or (not self.node.in_quant_part):
return super().forward(input)
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if (NndctOption.nndct_quant_off.value or
self.quantizer is None or
self.quantizer.exporting or
NndctOption.nndct_cv_app.value or
NndctOption.nndct_only_int_quant is False):
# Method 0: quant input and output (for CV)
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
else:
output = torch.empty_like(qinput)
input_name = self.node.in_nodes[0]
input_node = self.quantizer.configer.get_Nndctnode(input_name)
if not self.quantizer.configer.node_output_quantizable(input_node):
input_name = input_node.in_nodes[0]
fragpos = self.quantizer.get_quant_config(input_name, False)[1]
# Method 1: Simulation AIE with 16 bw (for RNNT)
if NndctOption.nndct_op_tanh_sigmoid_mode.value == "simulation":
NndctTanhSimulation(input, output, fragpos)
output = quantize_tensors([output], self.node)[0]
# Method 2: Table Look up for AIE2 with 16 bw (based on LUT)
elif NndctOption.nndct_op_tanh_sigmoid_mode.value == "aie2_lut_16bw" or NndctOption.nndct_ip_asr.value:
NndctTanhTableLookupAIE2(qinput, output, fragpos)
output = quantize_tensors([output], self.node)[0]
# Method 3: Table Look up for FPGA with 16 bw
else:
quant_device = qinput.device
Ttable = TANH_TABLE.table.to(qinput.dtype).to(quant_device)
output = output.to(quant_device)
NndctTanhTableLookup(input,
Ttable,
output,
fragpos)
bnfp = self.quantizer.get_quant_config(input_name, False)
bnfp[1] = 15
self.quantizer.set_quant_config(self.node.name, bnfp)
return output
import torch
def Tanh(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.Tanh(*args, **kwargs)
return deephi_Tanh(*args, **kwargs) | null |
23,547 | import math
import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
from .fix_ops import NndctExpApprAIE2, NndctLogSoftmaxFastLn, NndctLogSoftmaxSub
import pytorch_nndct.utils as py_utils
class deephi_LogSoftmax(torch.nn.modules.LogSoftmax):
r"""DeePhi LogSoftmax operation"""
def __init__(self, dim = None):
super(deephi_LogSoftmax, self).__init__()
self.dim = dim
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
if self.quant_mode == 0 or (not self.node.in_quant_part):
return super().forward(input)
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if (NndctOption.nndct_quant_off.value or
self.quantizer is None or
self.quantizer.exporting):
# Method 0: quant input and output
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
else:
# Method: Table Look up for AIE2 (based on LUT) with 16 bw
if NndctOption.nndct_op_softmax_mode.value == "aie2_lut_16bw":
input_name = self.node.in_nodes[0]
input_node = self.quantizer.configer.get_Nndctnode(input_name)
if not self.quantizer.configer.node_output_quantizable(input_node):
input_name = input_node.in_nodes[0]
bw = self.quantizer.get_quant_config(input_name, False)[0]
fragpos = self.quantizer.get_quant_config(input_name, False)[1]
if bw == 8 and fragpos < 2:
if fragpos < 2:
qinput_max = torch.max(qinput, dim = self.dim, keepdim = True).values
qinput -= qinput_max
else:
qinput -= 31.75
else: # bw == 16
if fragpos < 10:
qinput_max = torch.max(qinput)
qinput -= qinput_max
else:
qinput -= 32
qinput_exp = torch.empty_like(input)
NndctExpApprAIE2(qinput, qinput_exp, bw)
exp_sum = torch.sum(qinput_exp, dim = self.dim, keepdim = True)
ln_sum = torch.empty_like(exp_sum)
NndctLogSoftmaxFastLn(exp_sum, ln_sum)
output = torch.empty_like(input)
NndctLogSoftmaxSub(qinput, output, ln_sum)
output = quantize_tensors([output], self.node)[0]
else:
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def LogSoftmax(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.LogSoftmax(*args, **kwargs)
return deephi_LogSoftmax(*args, **kwargs) | null |
23,548 | import math
import torch
import pytorch_nndct.utils as py_utils
from nndct_shared.quantization import maybe_get_quantizer, quantize_tensors
from nndct_shared.utils import NndctOption, NndctScreenLogger
class deephi_AdaptiveAvgPool2d(torch.nn.modules.AdaptiveAvgPool2d):
r"""DeePhi Conv2d operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_AdaptiveAvgPool2d, self).__init__(*args, **kwards)
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
def forward(self, input):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = super().forward(qinput)
input_size = [int(dim) for dim in input.shape[2:]]
mod = [input_size[i] % self.output_size[i] for i in range(0, len(input_size))]
if mod != [0] * len(mod):
if self.node is not None:
NndctScreenLogger().warning_once(f"AdaptiveAvgpool2d op({self.node.name}) is not quantized. Because it's output size {self.output_size} are not factor of input size {input_size}.")
return output
# During slow trace, the dim of shape will convert to tensor value which is not support in nndct.
kernel = [int(input_size[i] / self.output_size[i]) for i in range(0, len(input_size))]
# scale to DPU accuracy
if NndctOption.nndct_avg_pool_approximate.value:
scale = 1.0
if kernel == [3, 3]:
scale = 9.0 * 7.0 / 64.0
elif kernel == [5, 5]:
scale = 25.0 * 10.0 / 256.0
elif kernel in [[6, 6], [3, 6], [6, 3]]:
scale = 36.0 * 7.0 / 256.0
elif kernel == [7, 7]:
scale = 49.0 * 21.0 / 1024.0
elif kernel == [14, 14]:
scale = 196.0 * 21.0 / 4096.0
else:
rec = kernel[0] * kernel[1]
max_factor = math.ceil(math.log(rec * 128, 2))
diff = 1.0
multi_factor = 0.0
shift_factor = 0.0
for shift_factor_ in range(max_factor):
factor = round((2 ** shift_factor_)/rec)
diff_ = abs(factor / (2 ** shift_factor_) - 1 / rec)
if diff_ < diff:
multi_factor = factor
diff = diff_
shift_factor = shift_factor_
scale = rec * multi_factor / (2 ** shift_factor)
output = output * scale
output = quantize_tensors([output], self.node)[0]
return output
def AdaptiveAvgPool2d(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode is None or NndctOption.nndct_quant_off.value:
return torch.nn.AdaptiveAvgPool2d(*args, **kwargs)
return deephi_AdaptiveAvgPool2d(*args, **kwargs) | null |
23,549 | import os
import re
import torch
from torch.autograd import Variable
import math
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger, create_work_dir
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
class deephi_GELU(torch.nn.GELU):
r"""DeePhi GELU operation, support float and double"""
def __init__(self, approximate=None):
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.12.0"):
super(deephi_GELU, self).__init__(approximate)
else:
super(deephi_GELU, self).__init__()
self.approximate = approximate
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
def gelu(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def forward(self, input):
if self.quant_mode <= 0 or (not self.node.in_quant_part) or NndctOption.nndct_gemm88.value:
return super().forward(input)
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if NndctOption.nndct_quant_off.value or self.quantizer.exporting:
output = super().forward(qinput)
else:
# Method 1: Dynamic table look up with 8 bw
if NndctOption.nndct_op_gelu_mode.value == "dynamic_table" or NndctOption.nndct_ip_v70_bert.value:
output = self.gelu(qinput)
# Method 0: Quant input and output of Softmax
elif compare_torch_version(CmpFlag.GREATER_EQUAL, "1.12.0"):
output = F.gelu(qinput, approximate=self.approximate)
else:
output = F.gelu(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def GELU(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.GELU(*args, **kwargs)
return deephi_GELU(*args, **kwargs) | null |
23,550 | import torch
from torch.autograd import Variable
import torch.nn.functional as F
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
import pytorch_nndct.utils as py_utils
from nndct_shared.utils import NNDCT_KEYS, GLOBAL_MAP
class deephi_Hardsigmoid(torch.nn.Module):
def __init__(self, inplace=False, *args, **kwards):
def forward(self, input):
def Hardsigmoid(*args, **kwargs):
return deephi_Hardsigmoid(*args, **kwargs) | null |
23,551 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
from nndct_shared.utils import NndctOption
class deephi_LeakyReLU(torch.nn.LeakyReLU):
r"""DeePhi LeakyReLU operation"""
def __init__(self, *args, **kwargs):
# only support the specified slope and inplace operation
super().__init__(*args, **kwargs)
if NndctOption.nndct_leaky_relu_approximate.value:
self.negative_slope = 0.1015625
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def LeakyReLU(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode is None or NndctOption.nndct_quant_off.value:
return torch.nn.LeakyReLU(*args, **kwargs)
return deephi_LeakyReLU(*args, **kwargs) | null |
23,552 | import torch
from torch.autograd import Variable
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_MaxPool1d(torch.nn.modules.MaxPool1d):
r"""DeePhi Conv1d operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_MaxPool1d, self).__init__(*args, **kwards)
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
def forward(self, input):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def MaxPool1d(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.MaxPool1d(*args, **kwargs)
return deephi_MaxPool1d(*args, **kwargs) | null |
23,553 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Interpolate(torch.nn.Module):
def __init__(self, *args, **kwards):
super(deephi_Interpolate, self).__init__(*args, **kwards)
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
def forward(self,
input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = torch.nn.functional.interpolate(qinput, size, scale_factor, mode,
align_corners)
output = quantize_tensors([output], self.node)[0]
return output
def Interpolate(*args, **kwargs):
return deephi_Interpolate(*args, **kwargs) | null |
23,554 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger, QError, QWarning
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
class deephi_ConvTranspose2d(torch.nn.modules.conv.ConvTranspose2d):
r"""DeePhi ConvTranspose2d operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_ConvTranspose2d, self).__init__(*args, **kwards)
self.params_name = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.param_saved = False
self.param_quantized = False
# self.weight and self.bias are not quantized float parameters
self.weight_bak = None # backup of float bias for bias correction
self.bias_bak = None # backup of float bias for bias correction
self.efficency = 0.0
self.deviation = 0.0
self.stop = False
self.rate = NndctOption.nndct_param_corr_rate.value
def forward(self, input):
# backup bias for bias correction feature
if (not self.param_saved):
if NndctOption.nndct_param_corr.value > 0:
# backup orignal float parameters
if self.quant_mode == 1:
self.weight_bak = self.weight.detach().clone()
if self.bias is not None:
self.bias_bak = self.bias.detach().clone()
# adjust bias
if self.quant_mode == 2 and self.bias is not None:
if not self.quantizer.has_bias_corr(self.node):
NndctScreenLogger().error2user(QError.BIAS_CORRECTION, f"Bias correction file in quantization result directory does not match current model.")
exit(2)
self.bias.data = torch.sub(self.bias.data, torch.tensor(
self.quantizer.get_bias_corr(self.node),
device=self.bias.data.device,
dtype=self.bias.data.dtype))
self.param_saved = True
# quantize parameters
qweight = None
qbias = None
inplace = (NndctOption.nndct_quant_off.value or
self.quantizer is not None and self.quantizer.inplace)
if (not self.param_quantized):
if inplace:
_ = quantize_tensors(
[self.weight],
self.node,
tensor_names = [self.params_name[0]],
tensor_type = 'param')[0]
qweight = self.weight
if self.bias is not None:
_ = quantize_tensors(
[self.bias],
self.node,
tensor_names = [self.params_name[1]],
tensor_type = 'param')[0]
qbias = self.bias
else:
qweight = quantize_tensors(
[self.weight],
self.node,
tensor_names = [self.params_name[0]],
tensor_type = 'param')[0]
if self.bias is not None:
qbias = quantize_tensors(
[self.bias],
self.node,
tensor_names = [self.params_name[1]],
tensor_type = 'param')[0]
if not NndctOption.nndct_quant_off.value:
self.param_quantized = True
else:
qweight = self.weight
qbias = self.bias
# quantize input tensor
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if compare_torch_version(CmpFlag.LESS_EQUAL, "1.11.0"):
output_padding = self._output_padding(qinput,
None,
self.stride,
self.padding,
self.kernel_size)
else:
num_spatial_dims = 2
output_padding = self._output_padding(
qinput, None, self.stride, self.padding, self.kernel_size, num_spatial_dims, self.dilation)
output = torch.nn.functional.conv_transpose2d(qinput,
weight = qweight,
bias = qbias,
stride = self.stride,
padding = self.padding,
output_padding = output_padding,
dilation = self.dilation,
groups = self.groups)
output = quantize_tensors([output], self.node)[0]
# correct weights and bias in calibation
if NndctOption.nndct_param_corr.value > 0:
#rate = NndctOption.nndct_param_corr_rate.value
# statistic of quantization error
if (self.quant_mode == 1 and not self.stop):
if compare_torch_version(CmpFlag.LESS_EQUAL, "1.11.0"):
output_padding = self._output_padding(input,
None,
self.stride,
self.padding,
self.kernel_size)
else:
num_spatial_dims = 2
output_padding = self._output_padding(input, None, self.stride, self.padding, self.kernel_size, num_spatial_dims, self.dilation)
res_f = torch.nn.functional.conv_transpose2d(input,
self.weight_bak,
bias = self.bias_bak,
stride = self.stride,
padding = self.padding,
output_padding = output_padding,
dilation = self.dilation,
groups = self.groups)
error, rate, self.stop, self.efficency, self.deviation = eval_qnoise(
output,
res_f,
self.efficency,
self.deviation,
self.rate,
self.stop)
if (not self.stop) and (self.bias is not None):
error = error.mean(dim = [0, 2, 3])
self.bias.data = torch.sub(self.bias.data, error, alpha=rate)
self.param_quantized = False
return output
def bias_corr(self):
if self.bias is not None and self.bias_bak is not None:
bias_err = torch.sub(self.bias_bak, self.bias.data)
return bias_err.cpu().numpy().tolist()
else:
return None
def ConvTranspose2d(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.ConvTranspose2d(*args, **kwargs)
return deephi_ConvTranspose2d(*args, **kwargs) | null |
23,555 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
class deephi_PReLU(torch.nn.PReLU):
def __init__(self, *args, **kwards):
def forward(self, input):
def PReLU(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.PReLU(*args, **kwargs)
return deephi_PReLU(*args, **kwargs) | null |
23,556 | import torch
from torch.autograd import Variable
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.utils import NndctOption
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_MaxPool2d(torch.nn.modules.MaxPool2d):
r"""DeePhi Conv2d operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_MaxPool2d, self).__init__(*args, **kwards)
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
def forward(self, input):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def MaxPool2d(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.MaxPool2d(*args, **kwargs)
return deephi_MaxPool2d(*args, **kwargs) | null |
23,557 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Int(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input):
output = int(input)
return output
def Int(*args, **kwargs):
return deephi_Int(*args, **kwargs) | null |
23,558 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_QuantInput(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = qinput
if NndctOption.nndct_stat.value > 2:
print('Channel number of input data: {}'.format(output.shape[1]))
print('Input data histogram: {}'.format( output.histc(bins = 10).cpu().detach().numpy() ))
print('Network input channel-wise statistic [Min, Max, Mean, Std]:')
t = output.transpose(0, 1)
for c in range(t.shape[0]):
print('[{}, {}, {}, {}]'.format( t[c].min(), t[c].max(), t[c].float().mean(), t[c].float().std() ))
print('histogram: {}'.format( t[c].histc(bins = 10).cpu().detach().numpy() ))
output = quantize_tensors([output], self.node)[0]
return output
def quant_input(*args, **kwargs):
return deephi_QuantInput(*args, **kwargs) | null |
23,559 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_DequantOutput(_PrimModule):
def __init__(self):
def forward(self, input):
def dequant_output(*args, **kwargs):
return deephi_DequantOutput(*args, **kwargs) | null |
23,560 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Input(_PrimModule):
def __init__(self):
def forward(self, input):
def Input(*args, **kwargs):
return deephi_Input(*args, **kwargs) | null |
23,561 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_StridedSlice(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input, dim, start, end, step):
size = input.size()
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(dim[0]):
start_symbol.append(str(0))
end_symbol.append(str(int(size[i])))
step_symbol.append(str(1))
for i in range(len(start)):
start_symbol.append(str(int(start[i])))
end_symbol.append(str(int(end[i])))
step_symbol.append(str(int(step[i])))
for i in range(len(start_symbol)):
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
eval_str = f"input[{symbols}]"
output = eval(eval_str)
output = quantize_tensors([output], self.node)[0]
return output
def strided_slice(*args, **kwargs):
return deephi_StridedSlice(*args, **kwargs) | null |
23,562 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_SliceInplaceCopy(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input, source, dim, index):
index = torch.tensor([index]).to(input.device)
if input.dtype != source.dtype:
source = source.to(input.dtype)
output = input.index_copy_(dim, index, source.unsqueeze(dim))
output = quantize_tensors([output], self.node)[0]
return output
def slice_tensor_inplace_copy(*args, **kwargs):
return deephi_SliceInplaceCopy(*args, **kwargs) | null |
23,563 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Index(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input, index):
if isinstance(index, (list, tuple)):
break_symbol = ':'
symbols = ""
for i in range(len(index)):
if index[i] == None:
slice_symbol = break_symbol
else:
slice_symbol = "index[" + str(i) + "]"
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
eval_str = f"input[{symbols}]"
output = eval(eval_str)
output = quantize_tensors([output], self.node)[0]
else:
output = input[index]
return output
def Index(*args, **kwargs):
return deephi_Index(*args, **kwargs) | null |
23,564 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_IndexInputInplace(_PrimModule):
def __init__(self):
def forward(self, input, indices, values, accumulate):
def index_put_(*args, **kwargs):
return deephi_IndexInputInplace(*args, **kwargs) | null |
23,565 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_ReLUk(_PrimModule):
def __init__(self):
def forward(self, input:torch.Tensor, channel_max:Union[torch.Tensor, Sequence[Any], float]):
def Reluk(*args, **kwargs):
return deephi_ReLUk(*args, **kwargs) | null |
23,566 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_ChannelScale(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input:torch.Tensor, channel_scale:Union[torch.Tensor, Sequence[Any], float]):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if isinstance(channel_scale, (list, tuple)):
channel_scale = torch.Tensor(channel_scale).to(input.device)
elif isinstance(channel_scale, float):
channel_scale = torch.Tensor([channel_scale]).to(input.device)
'''
if self.node.in_quant_part:
channel_scale = quant_channel_scale_params(self.node, channel_scale)
'''
output = qinput * channel_scale
if self.node.in_quant_part:
output = quantize_tensors([output], self.node)[0]
return output
def Channel_Scale(*args, **kwargs):
return deephi_ChannelScale(*args, **kwargs) | null |
23,567 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_ExpandAs(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input, other):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = qinput.expand_as(other).clone()
output = quantize_tensors([output], self.node)[0]
return output
def expand_as(*args, **kwargs):
return deephi_ExpandAs(*args, **kwargs) | null |
23,568 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Expand(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input, size):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = qinput.expand(size).clone()
output = quantize_tensors([output], self.node)[0]
return output
def expand(*args, **kwargs):
return deephi_Expand(*args, **kwargs) | null |
23,569 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Correlation1D_Elemwise(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input_1:torch.Tensor, input_2:torch.Tensor, pad_size:Union[torch.Tensor, Sequence[Any], int]):
qinput_1 = quantize_tensors([input_1], self.node, tensor_type='input')[0]
qinput_2 = quantize_tensors([input_2], self.node, tensor_type='input')[0]
if isinstance(pad_size, (list, tuple)):
pad_size = torch.Tensor(pad_size).to(qinput_1.device)
elif isinstance(pad_size, float):
pad_size = torch.Tensor([pad_size]).to(qinput_1.device)
output_dim = pad_size + 1
B, C, H, W = qinput_1.size()
qinput_2 = F.pad(qinput_2, pad=(pad_size,0,0,0), mode="constant",value=0)
cv = []
for i in range(output_dim - 1, -1, -1):
cost = qinput_1 * qinput_2[:, :, :, i:(i + W)]
cost = cost.unsqueeze(2)
cv.append(cost)
output = torch.cat(cv, 2)
if self.node.in_quant_part:
output = quantize_tensors([output], self.node)[0]
return output
def Correlation1d_Elemwise(*args, **kwargs):
return deephi_Correlation1D_Elemwise(*args, **kwargs) | null |
23,570 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Correlation2D_Elemwise(_PrimModule):
def __init__(self):
super().__init__()
def forward(self, input_1:torch.Tensor, input_2:torch.Tensor, pad_size:Union[torch.Tensor, Sequence[Any], int]):
qinput_1 = quantize_tensors([input_1], self.node, tensor_type='input')[0]
qinput_2 = quantize_tensors([input_2], self.node, tensor_type='input')[0]
if isinstance(pad_size, (list, tuple)):
pad_size = torch.Tensor(pad_size).to(qinput_1.device)
elif isinstance(pad_size, float):
pad_size = torch.Tensor([pad_size]).to(qinput_1.device)
output_dim = 2 * pad_size + 1
B, C, H, W = qinput_1.size()
qinput_2 = F.pad(qinput_2, [pad_size] * 4)
cv = []
for i in range(output_dim):
for j in range(output_dim):
cost = qinput_1 * qinput_2[:, :, i:(i + H), j:(j + W)]
cost = cost.unsqueeze(2)
cv.append(cost)
output = torch.cat(cv, 2)
if self.node.in_quant_part:
output = quantize_tensors([output], self.node)[0]
return output
def Correlation2d_Elemwise(*args, **kwargs):
return deephi_Correlation2D_Elemwise(*args, **kwargs) | null |
23,571 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_CostVolume(_PrimModule):
def __init__(self):
def forward(self, input_1:torch.Tensor, input_2:torch.Tensor, maxdisp:Union[torch.Tensor, Sequence[Any], int]):
def CostVolume(*args, **kwargs):
return deephi_CostVolume(*args, **kwargs) | null |
23,572 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_TupleUnpack(_PrimModule):
def __init__(self):
def forward(self, input):
def TupleUnpack(*args, **kwargs):
return deephi_TupleUnpack(*args, **kwargs) | null |
23,573 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Sub(_PrimModule):
def __init__(self, reverse=False):
super().__init__()
self.reverse = reverse
def forward(self, input, other, alpha=1):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if self.reverse is True:
output = -(qinput - alpha * other)
else:
output = qinput - alpha * other
output = quantize_tensors([output], self.node)[0]
return output
def Sub(*args, **kwargs):
return deephi_Sub() | null |
23,574 | import os
import torch
import torch.nn.functional as F
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.base import NNDCT_CONSTANT
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import quant_reluk_params
from nndct_shared.quantization import quant_channel_scale_params
import pytorch_nndct.utils as py_utils
from typing import Any, Optional, Sequence, Union
from torch.autograd import Variable
class deephi_Sub(_PrimModule):
def __init__(self, reverse=False):
super().__init__()
self.reverse = reverse
def forward(self, input, other, alpha=1):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if self.reverse is True:
output = -(qinput - alpha * other)
else:
output = qinput - alpha * other
output = quantize_tensors([output], self.node)[0]
return output
def Rsub(*args, **kwargs):
return deephi_Sub(reverse=True) | null |
23,575 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Mul(torch.nn.Module):
def __init__(self):
super(deephi_Mul, self).__init__()
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input, other):
[qinput, qother] = quantize_tensors(
[input, other],
self.node,
tensor_type='input')
output = torch.mul(qinput, qother)
output = quantize_tensors([output], self.node)[0]
return output
def Mul(*args, **kwargs):
return deephi_Mul(*args, **kwargs) | null |
23,576 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
import pytorch_nndct.utils as py_utils
from nndct_shared.utils import calculate_op_scale
class deephi_Mean(torch.nn.Module):
r"""DeePhi Concat operation"""
def __init__(self, *args, **kwargs):
super(deephi_Mean, self).__init__()
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input, dim, keepdim):
if self.quantizer is None or self.quant_mode is None or NndctOption.nndct_quant_off.value:
return self._fp32_forward(input, dim, keepdim)
else:
return self._forward(input, dim, keepdim)
def _fp32_forward(self, input, dim, keepdim):
if dim is None:
output = torch.mean(input)
else:
output = torch.mean(input, dim, keepdim)
return output
def _forward(self, input, dim, keepdim):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if dim is None:
output = torch.mean(qinput)
else:
output = torch.mean(qinput, dim, keepdim)
input_shape = self.node.in_tensors[0].shape
if self.node.node_attr(self.node.op.AttrName.DIMS) == [None]:
dim_list = [i for i in range(len(input_shape))]
else:
dim_list = self.node.node_attr(self.node.op.AttrName.DIMS)
rec = 1
for i in dim_list:
input_shape = input.shape[i].item() if isinstance(input.shape[i], torch.Tensor) else input.shape[i]
rec = rec * input_shape
if (rec & (rec - 1)) != 0:
scale = calculate_op_scale(rec, self.node)
output = output * scale
output = quantize_tensors([output], self.node)[0]
return output
def Mean(*args, **kwargs):
return deephi_Mean(*args, **kwargs) | null |
23,577 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Cat(torch.nn.Module):
def __init__(self, *args, **kwargs):
def forward(self, tensors, dim):
def Cat(*args, **kwargs):
return deephi_Cat(*args, **kwargs) | null |
23,578 | import numpy as np
def mysigmoid(x):
s = 1 / (1 + np.exp(-x))
return s | null |
23,579 | import numpy as np
def mapping_sigm(x, data, shift):
scale = 1.0 / 2 ** 15
inv_step = 2 ** shift
def __ele_map(x_ele):
scale = 2 ** -15
if x_ele >= 8:
return 1.0 - scale
elif x_ele < -8:
return 0.0
else:
x_ele = int(x_ele * inv_step)
if x_ele >=0:
if shift >= 7:
pos = (x_ele >> (shift - 7)) % 1024
else:
pos = (x_ele << (7 - shift)) % 1024
return data[pos+1024] * scale
else:
if shift >= 7:
pos = (abs(x_ele) >> (shift - 7)) % 1024
else:
pos = (abs(x_ele) << (7 - shift)) % 1024
if x_ele >> shift == -8 and pos == 0:
return 0.0
else:
return data[1024-pos] * scale
return np.array([[__ele_map(c) for c in row] for row in x], dtype=np.float32) | null |
23,580 | import numpy as np
def mapping_tanh(x, data, shift):
scale = 1.0 / 2 ** 15
inv_step = 2 ** shift
def __ele_map(x_ele):
if x_ele >= 4:
return 1.0 - scale
elif x_ele < -4:
return -1.0
else:
x_ele = int(x_ele * inv_step)
if x_ele >= 0:
if shift >= 8:
pos = (x_ele >> (shift - 8)) % 1024
else:
pos = (x_ele << (8 - shift)) % 1024
return data[pos + 1024] * scale
else:
if shift >= 8:
pos = (abs(x_ele) >> (shift - 8)) % 1024
else:
pos = (abs(x_ele) << (8 - shift)) % 1024
if x_ele >> shift == -4 and pos == 0:
return data[pos] * scale
else:
return data[1024-pos] * scale
return np.array([[__ele_map(c) for c in row] for row in x], dtype=np.float32) | null |
23,581 | import numpy as np
def absolute_shift(x, pos, to='left', bitwidth=16):
res = 0
if to == 'left':
if pos >= 0:
res = np.left_shift(x, pos)
else:
res = np.right_shift(x, -pos)
elif to == 'right':
if pos >= 0:
res = np.right_shift(x, pos)
else:
res = np.left_shift(x, -pos)
else:
raise TypeError("shift to {} is not expected".format(to))
res = np.where(res > (2**(bitwidth - 1) - 1), 2**(bitwidth - 1) - 1, res)
res = np.where(res < -2**(bitwidth - 1), -2**(bitwidth - 1), res)
return res | null |
23,582 | import numpy as np
def absolute_shift_round(x, pos, to='left', bitwidth=16):
res = 0
if to == 'left':
if pos >= 0:
#res = np.left_shift(x, pos)
res = x * (2**pos)
else:
#res = np.right_shift(x, -pos)
res = x * (2**(-pos))
elif to == 'right':
if pos >= 0:
#res = np.right_shift(x, pos)
res = np.round(x / (2**pos)).astype(np.int32)
else:
res = np.round(x / (2**(-pos))).astype(np.int32)
else:
raise TypeError("shift to {} is not expected".format(to))
res = np.where(res > (2**(bitwidth - 1) - 1), 2**(bitwidth - 1) - 1, res)
res = np.where(res < -2**(bitwidth - 1), -2**(bitwidth - 1), res)
return res | null |
23,583 | from typing import List, Tuple
import torch
from torch import Tensor
from torch.nn.utils.rnn import (PackedSequence, pack_padded_sequence,
pad_packed_sequence)
from .rnn_cell import LSTMCell
from .rnn_layer import LSTMLayer, QuantGruLayer, QuantLstmLayer
def init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args):
layers = [layer(*first_layer_args)
] + [layer(*other_layer_args) for _ in range(num_layers - 1)]
return torch.nn.ModuleList(layers) | null |
23,584 | from typing import List, Tuple
import torch
from torch import Tensor
from torch.nn.utils.rnn import (PackedSequence, pack_padded_sequence,
pad_packed_sequence)
from .rnn_cell import LSTMCell
from .rnn_layer import LSTMLayer, QuantGruLayer, QuantLstmLayer
class LSTM(torch.nn.Module):
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
def forward(
self, input: Tensor, states: Tuple[Tensor, Tensor]
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
def get_normal_lstm_param_name(name):
class LSTMCell(torch.nn.Module):
def __init__(self, input_size: int, hidden_size: int, bias: bool = True) -> None:
def init_weight(self):
def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
class LSTMLayer(torch.nn.Module):
def __init__(self, cell, input_size: int, hidden_size: int, bias: bool = True):
def forward(self, input: Tensor, state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
def stacked_lstm(lstm_module: torch.nn.LSTM) -> LSTM:
num_layers = lstm_module.num_layers
input_size = lstm_module.input_size
hidden_size = lstm_module.hidden_size
bias = lstm_module.bias
stacked_lstm = LSTM(
num_layers,
LSTMLayer,
first_layer_args=[LSTMCell, input_size, hidden_size, bias],
other_layer_args=[LSTMCell, hidden_size, hidden_size, bias])
param_num_per_layer = len(list(lstm_module.parameters())) // num_layers
param_layer_list = []
for i, (name, param) in enumerate(lstm_module.named_parameters()):
if i % param_num_per_layer == 0:
param_layer_list.append({})
param_layer_list[-1][get_normal_lstm_param_name(name)] = param
for layer_idx in range(num_layers):
for name, param in stacked_lstm.layers[layer_idx].named_parameters():
normalized_name = get_normal_lstm_param_name(name)
if normalized_name is not None and normalized_name in param_layer_list[layer_idx]:
param.data.copy_(param_layer_list[layer_idx][normalized_name].data)
stacked_lstm.layers[layer_idx].cell.init_weight()
return stacked_lstm | null |
23,585 | import torch
from torch.autograd import Variable
import torch.nn.functional as F
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
import pytorch_nndct.utils as py_utils
from .fix_ops import fake_quantize_per_tensor
from nndct_shared.utils import NNDCT_KEYS, GLOBAL_MAP
class deephi_Hardswish(torch.nn.Module):
def __init__(self, inplace=False, *args, **kwards):
def forward(self, input):
def Hardswish(*args, **kwargs):
return deephi_Hardswish(*args, **kwargs) | null |
23,586 | import torch
from nndct_shared.utils import NndctOption
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Embedding(torch.nn.modules.sparse.Embedding):
r"""DeePhi transpose operation, support float and double"""
def __init__(self, *args, **kwargs):
super(deephi_Embedding, self).__init__(*args, **kwargs)
self.params_name = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.param_quantized = False
def forward(self, input):
if self.quant_mode <= 0 or (not self.node.in_quant_part):
return torch.nn.functional.embedding(input, self.weight, padding_idx=self.padding_idx)
inplace = (NndctOption.nndct_quant_off.value or
self.quantizer is not None and self.quantizer.inplace)
# params = []
# qparams = []
# param_names = []
# for k in self.node.op.params.keys():
# pname = self.node.op.params[k].name
# p = getattr(self.quantizer.quant_model, k.value)
# param_names.append(pname)
# params.append(p)
if not self.param_quantized:
if inplace:
_ = quantize_tensors([self.weight],
self.node,
tensor_names= [self.params_name[0]],
tensor_type='param')
qparams = [self.weight]
else:
qparams = quantize_tensors([self.weight],
self.node,
tensor_names= [self.params_name[0]],
tensor_type='param')
if not NndctOption.nndct_quant_off.value:
self.param_quantized = True
else:
qparams = [self.weight]
inputs = quantize_tensors([input], self.node, tensor_type='input')
output = torch.nn.functional.embedding(inputs[0], qparams[0], padding_idx=self.padding_idx)
output = quantize_tensors([output], self.node)[0]
return output
def embedding(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.Embedding(*args, **kwargs)
return deephi_Embedding(*args, **kwargs) | null |
23,587 | import math
import torch
import numpy as np
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import kernel_need_quant
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
from .fix_ops import NndctSoftmaxExpApproximate, NndctSoftmaxLOD, NndctSoftmaxSimulationPart1, NndctSoftmaxSimulationPart2, NndctExpApprAIE2, NndctInverseAIE2
import pytorch_nndct.utils as py_utils
from pytorch_nndct.nn.nonlinear import approx
class deephi_Softmax(torch.nn.modules.Softmax):
r"""DeePhi Softmax operation"""
def __init__(self, dim = None):
super(deephi_Softmax, self).__init__()
self.dim = dim
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
if (not kernel_need_quant(self.quantizer, self.node) or
self.quantizer.exporting) or NndctOption.nndct_gemm88.value:
# Method 0: quant input and output
output = super().forward(input)
output = quantize_tensors([output], self.node)[0]
else:
# quantize input tensor
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
input_name = self.node.in_nodes[0]
input_node = self.quantizer.configer.get_Nndctnode(input_name)
if not self.quantizer.configer.node_output_quantizable(input_node):
input_name = input_node.in_nodes[0]
# Method 1: Hardware PL Softmax with 8 bw
if NndctOption.nndct_op_softmax_mode.value == "hardware_pl":
x_max = torch.max(qinput, dim = self.dim, keepdim = True).values
Exp_sum_appr = 0.0
uvi = 47274 / math.pow(2,15) * (qinput - x_max)
exp_appr = torch.empty_like(uvi)
NndctSoftmaxExpApproximate(uvi, exp_appr)
exp_appr = torch.round(exp_appr*10**5)
exp_appr = exp_appr/(10**5)
Exp_sum_appr = torch.sum(exp_appr, dim = self.dim, keepdim = True)
F = Exp_sum_appr
w = torch.empty_like(F)
NndctSoftmaxLOD(F, w)
m = F/(2**w)
lnF = torch.round((22713/(2**15))*(m-1+w)*10**5)/10**5
uvi = 47274 / (2**15) * (qinput - x_max - lnF)
exp_appr = torch.empty_like(uvi)
NndctSoftmaxExpApproximate(uvi, exp_appr)
exp_appr = torch.round(exp_appr*10**5)/10**5
output = exp_appr
output = quantize_tensors([output], self.node)[0]
# Method 2: Liyi Softmax with any bw
elif NndctOption.nndct_op_softmax_mode.value == "liyi":
x_max = torch.max(qinput, dim = self.dim, keepdim = True).values
qinput = qinput - x_max
exp_appr = torch.empty_like(qinput)
NndctSoftmaxSimulationPart1(qinput, exp_appr)
sum = torch.sum(exp_appr, dim = self.dim, keepdim = True)
sum1 = torch.empty_like(sum)
NndctSoftmaxSimulationPart2(sum, sum1)
output = (exp_appr*sum1).bfloat16().float()
output = quantize_tensors([output], self.node)[0]
# Method 3: Table Look up for AIE2 Softmax with 8 bw and 16 bw(based on LUT)
elif NndctOption.nndct_op_softmax_mode.value == "aie2_lut_16bw":
bw = self.quantizer.get_quant_config(self.node.name, False)[0]
fragpos = self.quantizer.get_quant_config(input_name, False)[1]
if bw == 8 and fragpos < 2:
if fragpos < 2:
qinput_max = torch.max(qinput, dim = self.dim, keepdim = True).values
qinput -= qinput_max
else:
qinput -= 31.75
else: # bw == 16
if fragpos < 10:
qinput_max = torch.max(qinput, dim = self.dim, keepdim = True).values
qinput -= qinput_max
else:
qinput -= 32
exp_appr = torch.empty_like(qinput)
NndctExpApprAIE2(qinput, exp_appr, bw)
sum = torch.sum(exp_appr, dim = self.dim, keepdim = True)
sum_inv = torch.empty_like(sum)
NndctInverseAIE2(sum, sum_inv)
output = (exp_appr*sum_inv).bfloat16().float()
output = quantize_tensors([output], self.node)[0]
# Method 4: Bert with 8 bw
elif NndctOption.nndct_op_softmax_mode.value == "bert_8bw" or NndctOption.nndct_ip_v70_bert.value:
# def generate_exp_table(bw, input_scale, table_name):
# from bfloat16 import bfloat16
# inputs_table_positive = np.arange(0, 2**(bw - 1)).astype(np.float32)
# inputs_table_negatice = np.arange(-(2**(bw - 1)), 0).astype(np.float32)
# inputs_table = np.hstack((inputs_table_positive, inputs_table_negatice))
# outputs_table = np.exp(inputs_table / (2**(input_scale))).astype(bfloat16).astype(np.float32)
# outputs_table[128] = 0
# return outputs_table
def compute_inv(x):
exp_mask = 0x7F800000
mantissa_mask= 0x007FFFFF
mantissa_Q = 0x00008000
x_f = x.astype(np.float32)
B_x = x_f.view(np.uint32)
exponent = (B_x & exp_mask)>>23
mantissa = np.where(((B_x & mantissa_Q)==0), ((B_x & mantissa_mask)>>16), ((B_x & mantissa_mask)>>16)+1)
inv_exponent = 253-exponent
inv_mantissa = np.round(256*128/(128+mantissa)-128)
inv_x_val = (np.int32(inv_exponent)<<23) + (np.int32(inv_mantissa)<<16)
inv_x = inv_x_val.view(np.float32)
return inv_x
bw = self.quantizer.get_quant_config(self.node.name, False)[0]
fragpos = self.quantizer.get_quant_config(input_name, False)[1]
exp_appr = torch.exp(qinput).bfloat16().float()
exp_appr = torch.where(qinput > -128/(2 ** fragpos), exp_appr, 0)
# # Code for Golden Verification
# exp_table = generate_exp_table(bw, fragpos, self.node.name)
# qinput_int = torch.where(qinput >=0, qinput * (2**(fragpos)), qinput * (2**(fragpos)) + 2**bw)
# exp_arr = exp_table[qinput_int.cpu().numpy().astype(np.int8)]
# exp_appr = torch.from_numpy(exp_arr).to(input.device)
if self.dim == -1:
sum_v32 = exp_appr.reshape((exp_appr.shape[0],exp_appr.shape[1],exp_appr.shape[2],exp_appr.shape[3]//16,16))
sum_v32 = sum_v32.permute((0,1,2,4,3))
sum_v32 = sum_v32.sum(dim = -1)
# sum_v32 = sum_v32.sum(dim = -1, keepdim = True)
sum_v32_shape = sum_v32.shape
sum_v32 = sum_v32.reshape(-1, 16)
sum_v32 = py_utils.aie_add_v16(sum_v32)
sum_v32_shape = [int(x) for x in sum_v32_shape][:-1] + [1]
sum_v32 = sum_v32.reshape(sum_v32_shape)
else:
sum_v32 = torch.sum(exp_appr, dim = self.dim, keepdim = True).cpu().numpy()
sum_inv = compute_inv(sum_v32.cpu().numpy())
sum_inv = torch.from_numpy(sum_inv).bfloat16().float().to(qinput.device)
output = (exp_appr*sum_inv).bfloat16().float()
output = quantize_tensors([output], self.node, method=4)[0]
# Method 5: ipu 8bw
elif NndctOption.nndct_op_softmax_mode.value == "ipu_8bw":
output = approx.softmax_approx_poly(qinput) # bfloat16, default settings: axis=-1, exp_table_size=1, degree=3
output = output.float() # float32
output = quantize_tensors([output], self.node, method=4)[0] # round: floor
# Method 0: Quant input and output of Softmax
else:
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def Softmax(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.Softmax(*args, **kwargs)
return deephi_Softmax(*args, **kwargs) | null |
23,588 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
class deephi_clamp(torch.nn.Module):
r"""DeePhi clamp operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_clamp, self).__init__(*args, **kwards)
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input, min=None, max=None):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
qmin, qmax = None, None
if min is not None:
min = torch.tensor(min).to(input.device)
qmin = quantize_tensors([min], self.node, tensor_names=[self.params_name[0]], tensor_type='param')[0]
if max is not None:
max = torch.tensor(max).to(input.device)
qmax = quantize_tensors([max], self.node, tensor_names=[self.params_name[1]], tensor_type='param')[0]
output = torch.clamp(input=qinput, min=qmin, max=qmax)
output = quantize_tensors([output], self.node)[0]
return output
# # quantize parameters
# qweight = None
# inplace = (NndctOption.nndct_quant_off.value or
# self.quantizer is not None and self.quantizer.inplace)
# # quantize input tensor
# qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
# # output = torch.clamp(input=qinput, min=min, max=max)
# qmin, qmax = quantize_tensors(torch.tensor([min, max]), self.node, tensor_type='input')
# output = torch.clamp(input=qinput, min=qmin, max=qmax)
# output = quantize_tensors([output], self.node)[0]
# return output
def Clamp(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.clamp(*args, **kwargs)
return deephi_clamp(*args, **kwargs) | null |
23,589 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.utils import NndctOption, NndctScreenLogger, QWarning
from nndct_shared.quantization import kernel_need_quant
from nndct_shared.quantization import quantize_tensors
import numpy as np
import pytorch_nndct.utils as py_utils
from .fix_ops import NndctAIESqrt
class deephi_sqrt(torch.nn.Module):
r"""DeePhi sqrt operation"""
def __init__(self, *args, **kwargs):
super(deephi_sqrt, self).__init__(*args, **kwargs)
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
# ensure input>=0
if torch.nonzero(input<0, as_tuple=False).numel() > 0:
NndctScreenLogger().warning2user(QWarning.TENSOR_NEGATIVE, f"Elements in input tensor of node {self.node.name} are negative, which is not permittable for 'sqrt' operation. The negative numbers have been replaced by zero.")
zero = torch.zeros_like(input)
input = torch.where(input<0, zero, input)
if not kernel_need_quant(self.quantizer, self.node): # not quantizable
output = torch.sqrt(input)
output = quantize_tensors([output], self.node)[0]
return output
# quantize input tensor
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if NndctOption.nndct_op_sqrt_mode.value == "ipu_8bw":
# sqrt=x*(1/sqrt(x)): cuda/cpu
output = torch.empty_like(qinput)
NndctAIESqrt(qinput, output) # float32
# quantize output
output = quantize_tensors([output], self.node, method=4)[0]
else:
output = torch.sqrt(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def sqrt(*args, **kwargs):
return deephi_sqrt(*args, **kwargs) | null |
23,590 | import torch
from nndct_shared.quantization.utils import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Matmul(torch.nn.Module):
def __init__(self):
def forward(self, input, other):
def Matmul(*args, **kwargs):
return deephi_Matmul(*args, **kwargs) | null |
23,591 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
The provided code snippet includes necessary dependencies for implementing the `NndctFixNeuron` function. Write a Python function `def NndctFixNeuron(Tinput, Toutput, maxamp, method=2)` to solve the following problem:
if Tinput.device == torch.device("cpu"): output = Tinput.cuda() nndct_kernels.FixNeuronV2(output, output, valmax, valamp, method) Tinput.copy_(output.cpu()) return Tinput # cpu fix neuron """ # output = Tinput.cpu().detach().numpy() # output = output * valamp # if method == 2: # output = np.where(output > valmax - 1, (valmax - 1), output) # output = np.where(output < (-valmax), -valmax, output) # output = np.where(np.logical_and(output > 0, np.logical_and(np.floor(output) % 2 == 0, output - np.floor(output) == 0.5)), np.ceil(output), output) # output = np.where(output >= 0, np.round(output), output) # output = np.where(np.logical_and(output < 0, output - np.floor(output) == 0.5), np.ceil(output), output) # output = np.where(output < 0, np.round(output), output) # elif method == 3: # output = np.where(output > valmax - 1, (valmax - 1), output) # output = np.where(output < (-valmax), -valmax, output) # output = np.where(np.logical_and(output > 0, np.logical_and(np.floor(output) % 2 == 0, output - np.floor(output) == 0.5)), np.ceil(output), output) # output = np.where(output >= 0, np.round(output), output) # output = np.where(np.logical_and(output < 0, np.logical_and(np.ceil(output) % 2 == 0, output - np.floor(output) == 0.5)), np.floor(output), output) # output = np.where(output < 0, np.round(output), output) # Tinput.copy_(torch.from_numpy(output)) # Tinput.div_(valamp) # return Tinput """ else: nndct_kernels.FixNeuronV2(Tinput, Toutput, valmax, valamp, method) return Toutput
Here is the function:
def NndctFixNeuron(Tinput, Toutput, maxamp, method=2):
valmax, valamp = maxamp[0], maxamp[1]
valmin = -valmax
valmax = valmax - 1
device_id = 1 if Tinput.device == torch.device("cpu") else 0
if support_onnx_export():
Toutput = torch.ops.vai.fix_neuron(Tinput, valmin, valmax,
valamp, 0, method, device_id, 1)
else:
nndct_kernels.FixNeuronV2(Tinput, Toutput, valmin, valmax,
valamp, 0, method, device_id)
return Toutput
'''
if Tinput.device == torch.device("cpu"):
output = Tinput.cuda()
nndct_kernels.FixNeuronV2(output, output, valmax,
valamp, method)
Tinput.copy_(output.cpu())
return Tinput
# cpu fix neuron
"""
# output = Tinput.cpu().detach().numpy()
# output = output * valamp
# if method == 2:
# output = np.where(output > valmax - 1, (valmax - 1), output)
# output = np.where(output < (-valmax), -valmax, output)
# output = np.where(np.logical_and(output > 0, np.logical_and(np.floor(output) % 2 == 0, output - np.floor(output) == 0.5)), np.ceil(output), output)
# output = np.where(output >= 0, np.round(output), output)
# output = np.where(np.logical_and(output < 0, output - np.floor(output) == 0.5), np.ceil(output), output)
# output = np.where(output < 0, np.round(output), output)
# elif method == 3:
# output = np.where(output > valmax - 1, (valmax - 1), output)
# output = np.where(output < (-valmax), -valmax, output)
# output = np.where(np.logical_and(output > 0, np.logical_and(np.floor(output) % 2 == 0, output - np.floor(output) == 0.5)), np.ceil(output), output)
# output = np.where(output >= 0, np.round(output), output)
# output = np.where(np.logical_and(output < 0, np.logical_and(np.ceil(output) % 2 == 0, output - np.floor(output) == 0.5)), np.floor(output), output)
# output = np.where(output < 0, np.round(output), output)
# Tinput.copy_(torch.from_numpy(output))
# Tinput.div_(valamp)
# return Tinput
"""
else:
nndct_kernels.FixNeuronV2(Tinput, Toutput, valmax,
valamp, method)
return Toutput
''' | if Tinput.device == torch.device("cpu"): output = Tinput.cuda() nndct_kernels.FixNeuronV2(output, output, valmax, valamp, method) Tinput.copy_(output.cpu()) return Tinput # cpu fix neuron """ # output = Tinput.cpu().detach().numpy() # output = output * valamp # if method == 2: # output = np.where(output > valmax - 1, (valmax - 1), output) # output = np.where(output < (-valmax), -valmax, output) # output = np.where(np.logical_and(output > 0, np.logical_and(np.floor(output) % 2 == 0, output - np.floor(output) == 0.5)), np.ceil(output), output) # output = np.where(output >= 0, np.round(output), output) # output = np.where(np.logical_and(output < 0, output - np.floor(output) == 0.5), np.ceil(output), output) # output = np.where(output < 0, np.round(output), output) # elif method == 3: # output = np.where(output > valmax - 1, (valmax - 1), output) # output = np.where(output < (-valmax), -valmax, output) # output = np.where(np.logical_and(output > 0, np.logical_and(np.floor(output) % 2 == 0, output - np.floor(output) == 0.5)), np.ceil(output), output) # output = np.where(output >= 0, np.round(output), output) # output = np.where(np.logical_and(output < 0, np.logical_and(np.ceil(output) % 2 == 0, output - np.floor(output) == 0.5)), np.floor(output), output) # output = np.where(output < 0, np.round(output), output) # Tinput.copy_(torch.from_numpy(output)) # Tinput.div_(valamp) # return Tinput """ else: nndct_kernels.FixNeuronV2(Tinput, Toutput, valmax, valamp, method) return Toutput |
23,592 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
def diffs_fix_pos(input, bit_width, scope, method):
# get max and min element in the tensor
abs_max = 1 << (bit_width - 1)
fix_lb = -abs_max - 0.5;
fix_ub = abs_max - 0.5;
x_max = torch.max(input)
x_min = torch.min(input)
# calculate step and fix pos based on max and min value
step = torch.max(x_min / fix_lb, x_max / fix_ub);
max_scale = torch.floor(torch.log2(1.0/step)) if step > sys.float_info.min else torch.tensor(18)
# calculate step based on diffs
final_scale = max_scale
fixed_diff_min = sys.float_info.max
if scope > 1:
# avoid clone multiple times
input = clone_view_tensor(input)
for i in range(0, scope):
scale = max_scale + i
qinput = fake_quantize_per_tensor(
input,
pow(2.0, scale),
0,
-abs_max,
abs_max-1,
method,
0)
qinput = torch.sub(input, qinput)
qinput = torch.pow(qinput, 2.0)
diff = torch.sum(qinput).item()
if diff < fixed_diff_min:
final_scale = scale
fixed_diff_min = diff
return final_scale
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctDiffsFixPos(Tinput, Tbuffer, Tfixpos, bit_width=8, range=5, method=2):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if support_onnx_export():
torch.ops.vai.diffs_fix_pos(Tinput, Tbuffer, Tfixpos, bit_width, range, method, device_id)
else:
nndct_kernels.DiffsFixPos(Tinput, Tbuffer, Tfixpos, bit_width, range, method, device_id) | null |
23,593 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def diffs_fix_pos(input, bit_width, scope, method):
# get max and min element in the tensor
abs_max = 1 << (bit_width - 1)
fix_lb = -abs_max - 0.5;
fix_ub = abs_max - 0.5;
x_max = torch.max(input)
x_min = torch.min(input)
# calculate step and fix pos based on max and min value
step = torch.max(x_min / fix_lb, x_max / fix_ub);
max_scale = torch.floor(torch.log2(1.0/step)) if step > sys.float_info.min else torch.tensor(18)
# calculate step based on diffs
final_scale = max_scale
fixed_diff_min = sys.float_info.max
if scope > 1:
# avoid clone multiple times
input = clone_view_tensor(input)
for i in range(0, scope):
scale = max_scale + i
qinput = fake_quantize_per_tensor(
input,
pow(2.0, scale),
0,
-abs_max,
abs_max-1,
method,
0)
qinput = torch.sub(input, qinput)
qinput = torch.pow(qinput, 2.0)
diff = torch.sum(qinput).item()
if diff < fixed_diff_min:
final_scale = scale
fixed_diff_min = diff
return final_scale
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctDiffsFixPosChannel(Tinput, Tbuffer, Tfixpos, axis, bit_width=8, scope=5, method=2):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
input_split = torch.split(Tinput, 1, dim=axis)
buffer_split = torch.split(Tbuffer, 1, dim=axis)
# TODO(@kewang): The split is a tensor view operation. Is it neccessary to clone tensor before calib and test ?
if support_onnx_export():
for i in range(len(input_split)):
torch.ops.vai.diffs_fix_pos(input_split[i], buffer_split[i], Tfixpos[i], bit_width, scope, method, device_id)
else:
for i in range(len(input_split)):
nndct_kernels.DiffsFixPos(input_split[i], buffer_split[i], Tfixpos[i], bit_width, scope, method, device_id) | null |
23,594 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctSigmoidTableLookup(Tinput, Ttable, Toutput, fragpos):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if support_onnx_export():
torch.ops.vai.SigmoidTableLookup(Tinput, Ttable, Toutput, fragpos, device_id)
else:
nndct_kernels.SigmoidTableLookup(Tinput, Ttable, Toutput, fragpos, device_id) | null |
23,595 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctSigmoidSimulation(Tinput, Toutput, fragpos):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if device_id == 1:
print("Sigmoid simulation does not support CPU")
else:
if support_onnx_export():
torch.ops.vai.SigmoidSimulation(Tinput, Toutput, fragpos, device_id)
else:
nndct_kernels.SigmoidSimulation(Tinput, Toutput, fragpos, device_id) | null |
23,596 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctTanhTableLookup(Tinput, Ttable, Toutput, fragpos):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if support_onnx_export():
torch.ops.vai.TanhTableLookup(Tinput, Ttable, Toutput, fragpos, device_id)
else:
nndct_kernels.TanhTableLookup(Tinput, Ttable, Toutput, fragpos, device_id) | null |
23,597 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctTanhSimulation(Tinput, Toutput, fragpos):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if device_id == 1:
print("Tanh simulation does not support CPU")
else:
if support_onnx_export():
torch.ops.vai.TanhSimulation(Tinput, Toutput, fragpos, device_id)
else:
nndct_kernels.TanhSimulation(Tinput, Toutput, fragpos, device_id) | null |
23,598 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctSoftmaxExpApproximate(Tinput, Toutput):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if device_id == 1:
print("Softmax Exponent Approximate does not support CPU")
else:
if support_onnx_export():
torch.ops.vai.SoftmaxExpApproximate(Tinput, Toutput, device_id)
else:
nndct_kernels.SoftmaxExpApproximate(Tinput, Toutput, device_id) | null |
23,599 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctSoftmaxLOD(Tinput, Toutput):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if device_id == 1:
print("Softmax LOD does not support CPU")
else:
if support_onnx_export():
torch.ops.vai.SoftmaxLOD(Tinput, Toutput, device_id)
else:
nndct_kernels.SoftmaxLOD(Tinput, Toutput, device_id) | null |
23,600 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctSoftmaxSimulationPart1(Tinput, Toutput):
device_id = 1 if Tinput.device == torch.device("cpu") else 0
Tinput = clone_view_tensor(Tinput)
if device_id == 1:
print("Softmax Simulation Part 1 does not support CPU")
else:
if support_onnx_export():
torch.ops.vai.SoftmaxSimulationPart1(Tinput, Toutput, device_id)
else:
nndct_kernels.SoftmaxSimulationPart1(Tinput, Toutput, device_id) | null |
23,601 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctSoftmaxSimulationPart2(sum, Toutput):
device_id = 1 if Toutput.device == torch.device("cpu") else 0
sum = clone_view_tensor(sum)
if device_id == 1:
print("Softmax Simulation Part 2 does not support CPU")
else:
if support_onnx_export():
torch.ops.vai.SoftmaxSimulationPart2(sum, Toutput, device_id)
else:
nndct_kernels.SoftmaxSimulationPart2(sum, Toutput, device_id) | null |
23,602 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def support_onnx_export():
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
return True
else:
return False
import sys
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7.0"):
new_kernel = True
class CmpFlag(Enum):
"""
Enum for comparison flags
"""
EQUAL = 0
LESS = 1
LESS_EQUAL = 2
GREATER = 3
GREATER_EQUAL = 4
NOT_EQUAL = 5
def compare_torch_version(compare_type:CmpFlag, version:str):
if compare_type == CmpFlag.EQUAL:
return LooseVersion(torch.__version__) == LooseVersion(version)
if compare_type == CmpFlag.LESS:
return LooseVersion(torch.__version__) < LooseVersion(version)
if compare_type == CmpFlag.LESS_EQUAL:
return LooseVersion(torch.__version__) <= LooseVersion(version)
if compare_type == CmpFlag.GREATER:
return LooseVersion(torch.__version__) > LooseVersion(version)
if compare_type == CmpFlag.GREATER_EQUAL:
return LooseVersion(torch.__version__) >= LooseVersion(version)
if compare_type == CmpFlag.NOT_EQUAL:
return LooseVersion(torch.__version__) != LooseVersion(version)
def fake_quantize_per_channel(input, scale_inv, zero_point, axis, quant_min, quant_max, method, inplace):
if method == -1:
if compare_torch_version(CmpFlag.GREATER, "0.9.0"):
zero_point = zero_point.to(torch.int32)
else:
zero_point = zero_point.to(torch.long)
return torch.fake_quantize_per_channel_affine(input, 1.0 / scale_inv, zero_point, axis, quant_min, quant_max)
else:
device_id = 1 if input.device == torch.device("cpu") else 0
if support_onnx_export():
scale = torch.where(scale_inv<sys.float_info.min, torch.tensor(sys.float_info.max, dtype=scale_inv.dtype, device=scale_inv.device), 1.0/scale_inv).to(torch.float)
# api: (tensor(float), int32, int32, tensor(float), tensor(int8), int32, int32, int32, bool)
output = torch.ops.vai.fix_neuron_per_channel(input, quant_min, quant_max, scale, zero_point.to(torch.int8), axis, method, device_id, inplace)
return output
else:
input_split = torch.split(input, 1, dim=axis)
output_cat = []
for i in range(len(input_split)):
output_split = input_split[i].clone() if inplace == 0 else input_split[i]
nndct_kernels.FixNeuronV2(input_split[i], output_split, quant_min,
quant_max, scale_inv[i], zero_point[i],
method, device_id)
output_cat.append(output_split)
output = torch.cat(output_cat, axis)
return output | null |
23,603 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def fake_quantize_per_channel_tensorrt(inputs, amax, min_bound, max_bound, axis=None):
# Computation must be in FP32 to prevent potential over flow.
if not isinstance(max_bound, torch.Tensor):
max_bound = torch.tensor(float(max_bound))
#max_bound = max_bound.double()
input_dtype = inputs.dtype
if inputs.dtype == torch.half:
inputs = inputs.float()
min_amax = amax.min()
if min_amax < 0:
raise ValueError("Negative values in amax")
scale = max_bound / amax
epsilon = 1. / (1<<24)
if min_amax <= epsilon: # Treat amax smaller than minimum representable of fp16 0
zero_amax_mask = (amax <= epsilon)
scale[zero_amax_mask] = 0 # Value quantized with amax=0 should all be 0
if axis != None:
for x_dim in range(inputs.ndim):
if x_dim != axis:
scale = torch.unsqueeze(scale, x_dim)
outputs = torch.clamp((inputs * scale).round_(), min_bound, max_bound)
if min_amax <= epsilon:
scale[zero_amax_mask] = 1. # Return 1 makes more sense for values quantized to 0 with amax=0
if input_dtype == torch.half:
outputs = outputs.half()
outputs = outputs / scale
return outputs | null |
23,604 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def fake_quantize_per_tensor_tensorrt(inputs, amax, min_bound, max_bound):
# Computation must be in FP32 to prevent potential over flow.
if not isinstance(max_bound, torch.Tensor):
max_bound = torch.tensor(float(max_bound))
#max_bound = max_bound.double()
if not isinstance(amax, torch.Tensor):
amax = torch.tensor(float(amax))
#amax = amax.double()
input_dtype = inputs.dtype
if inputs.dtype == torch.half:
inputs = inputs.float()
if amax < 0:
raise ValueError("Negative values in amax")
scale = max_bound / amax
epsilon = 1. / (1<<24)
if amax <= epsilon: # Treat amax smaller than minimum representable of fp16 0
scale = 0 # Value quantized with amax=0 should all be 0
outputs = torch.clamp((inputs * scale).round_(), min_bound, max_bound)
if amax <= epsilon:
scale = 1. # Return 1 makes more sense for values quantized to 0 with amax=0
if input_dtype == torch.half:
outputs = outputs.half()
outputs = outputs / scale
return outputs | null |
23,605 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctSigmoidTableLookupAIE2(Tinput, Toutput, fragpos):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
torch.ops.vai.SigmoidTableLookupAIE2(Tinput, Toutput, fragpos, device_id) | null |
23,606 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctTanhTableLookupAIE2(Tinput, Toutput, fragpos):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
torch.ops.vai.TanhTableLookupAIE2(Tinput, Toutput, fragpos, device_id) | null |
23,607 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctExpApprAIE2(Tinput, Toutput, bit_width):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
if device_id == 1:
print("Exp Approximation does not support CPU")
else:
torch.ops.vai.ExpApprAIE2(Tinput, Toutput, device_id, bit_width) | null |
23,608 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctLogSoftmaxFastLn(Tinput, Toutput):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
if device_id == 1:
print("LogSoftmax fast ln does not support CPU")
else:
torch.ops.vai.LogSoftmaxFastLn(Tinput, Toutput, device_id) | null |
23,609 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctLogSoftmaxSub(Tinput, Toutput, Tsum):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
if device_id == 1:
print("LogSoftmax subtraction does not support CPU")
else:
torch.ops.vai.LogSoftmaxSub(Tinput, Toutput, Tsum, device_id) | null |
23,610 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctAIESqrt(Tinput, Toutput):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
torch.ops.vai.AIESqrt(Tinput, Toutput, device_id) | null |
23,611 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctAIEISqrt(Tinput, Toutput):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
torch.ops.vai.AIEISqrt(Tinput, Toutput, device_id) | null |
23,612 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctISqrt(Tinput, Toutput):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
torch.ops.vai.LayernormISqrt(Tinput, Toutput, device_id) | null |
23,613 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
cloned_tensor = tensor
if (isinstance(tensor, torch.Tensor) and
hasattr(tensor, "storage") and
hasattr(tensor, "numel") and
tensor.storage().size() != tensor.numel()):
cloned_tensor = tensor.clone()
return cloned_tensor
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctLayernormInvSqrt(Tinput, Toutput):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
if device_id == 1:
print("Layernorm InvSqrt does not support CPU")
else:
torch.ops.vai.LayernormInvSqrt(Tinput, Toutput, device_id) | null |
23,614 | import sys
import torch
from ..load_kernels import *
import copy
import numpy as np
from nndct_shared.utils import NndctOption, NndctScreenLogger
from pytorch_nndct.nn.utils.decorator import pre_and_post_process_f16_tensor
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def clone_view_tensor(tensor):
import torch
from torch.utils.cpp_extension import load, _import_module_from_library
def NndctInverseAIE2(Tinput, Toutput):
Tinput = clone_view_tensor(Tinput)
device_id = 1 if Tinput.device == torch.device("cpu") else 0
if device_id == 1:
print("Inverse AIE2 does not support CPU")
else:
torch.ops.vai.InverseAIE2(Tinput, Toutput, device_id) | null |
23,615 | import math
import torch
from torch.autograd import Variable
from nndct_shared.quantization import maybe_get_quantizer, quantize_tensors
from nndct_shared.utils import NndctOption
import pytorch_nndct.utils as py_utils
class deephi_AvgPool2d(torch.nn.modules.AvgPool2d):
r"""DeePhi Conv2d operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_AvgPool2d, self).__init__(*args, **kwards)
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = super().forward(qinput)
# scale to DPU accuracy
if NndctOption.nndct_avg_pool_approximate.value:
scale = 1.0
if self.node.node_attr(self.node.op.AttrName.KERNEL) == [3, 3]:
scale = 9.0 * 7.0 / 64.0
elif self.node.node_attr(self.node.op.AttrName.KERNEL) == [5, 5]:
scale = 25.0 * 10.0 / 256.0
elif self.node.node_attr(self.node.op.AttrName.KERNEL) in [[6, 6], [3, 6], [6, 3]]:
scale = 36.0 * 7.0 / 256.0
elif self.node.node_attr(self.node.op.AttrName.KERNEL) == [7, 7]:
scale = 49.0 * 21.0 / 1024.0
elif self.node.node_attr(self.node.op.AttrName.KERNEL) == [14, 14]:
scale = 196.0 * 21.0 / 4096.0
else:
rec = self.node.node_attr(self.node.op.AttrName.KERNEL)[0] * self.node.node_attr(self.node.op.AttrName.KERNEL)[1]
max_factor = math.ceil(math.log(rec * 128,2))
diff = 1.0
multi_factor = 0.0
shift_factor = 0.0
for shift_factor_ in range(max_factor):
factor = round((2 ** shift_factor_)/rec)
diff_ = abs(factor / (2 ** shift_factor_) - 1/rec)
if diff_ < diff:
multi_factor = factor
diff = diff_
shift_factor = shift_factor_
scale = rec * multi_factor / (2 ** shift_factor)
output = output * scale
output = quantize_tensors([output], self.node)[0]
return output
def AvgPool2d(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode is None or NndctOption.nndct_quant_off.value:
return torch.nn.AvgPool2d(*args, **kwargs)
return deephi_AvgPool2d(*args, **kwargs) | null |
23,616 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.utils import NndctOption
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_ReLU(torch.nn.ReLU):
r"""DeePhi ReLU operation"""
def __init__(self, *args, **kwargs):
super(deephi_ReLU, self).__init__(*args, **kwargs)
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
return output
def ReLU(*args, **kwargs):
#quant_mode,_ = maybe_get_quantizer()
#if quant_mode==None:
# return
return deephi_ReLU(*args, **kwargs) | null |
23,617 | import torch
from nndct_shared.quantization.utils import maybe_get_quantizer, quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Add(torch.nn.Module):
def __init__(self):
super(deephi_Add, self).__init__()
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input, other, alpha=1):
[qinput, qother] = quantize_tensors([input, other], self.node, tensor_type='input')
output = torch.add(input=qinput, other=qother, alpha=alpha)
output = quantize_tensors([output], self.node)[0]
return output
def Add(*args, **kwargs):
return deephi_Add(*args, **kwargs) | null |
23,618 | import torch
import numpy as np
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS
from .sigmoid_table import *
from .fix_ops import NndctSigmoidTableLookup, NndctSigmoidSimulation, NndctSigmoidTableLookupAIE2
import pytorch_nndct.utils as py_utils
class deephi_Sigmoid(torch.nn.modules.Sigmoid):
r"""DeePhi Sigmoid operation"""
def __init__(self):
super(deephi_Sigmoid, self).__init__()
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input):
if self.quant_mode == 0 or (not self.node.in_quant_part):
return super().forward(input)
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if (NndctOption.nndct_quant_off.value or
self.quantizer is None or
self.quantizer.exporting or
NndctOption.nndct_cv_app.value or
NndctOption.nndct_only_int_quant is False):
# Method 0: quant input and output (for CV)
output = super().forward(qinput)
output = quantize_tensors([output], self.node)[0]
else:
output = torch.empty_like(qinput)
input_name = self.node.in_nodes[0]
input_node = self.quantizer.configer.get_Nndctnode(input_name)
if not self.quantizer.configer.node_output_quantizable(input_node):
input_name = input_node.in_nodes[0]
elif self.quantizer.configer.will_merge_with_table(input_node,
(not NndctOption.nndct_cv_app.value)):
output = super().forward(qinput)
bnfp = self.quantizer.get_quant_config(input_name, False)
bnfp[1] = 15
self.quantizer.set_quant_config(self.node.name, bnfp)
return output
bw = self.quantizer.get_quant_config(self.node.name, False)[0]
fragpos = self.quantizer.get_quant_config(input_name, False)[1]
# Method 1: Simulation AIE with 16 bw (for RNNT)
if NndctOption.nndct_op_tanh_sigmoid_mode.value == "simulation":
NndctSigmoidSimulation(qinput, output, fragpos)
output = quantize_tensors([output], self.node)[0]
# Method 2: Table Look up for AIE2 with 16 bw (based on LUT)
elif NndctOption.nndct_op_tanh_sigmoid_mode.value == "aie2_lut_16bw" or NndctOption.nndct_ip_asr.value:
NndctSigmoidTableLookupAIE2(qinput, output, fragpos)
output = quantize_tensors([output], self.node)[0]
# Method 3: Table Look up for FPGA with 16 bw
else:
quant_device = qinput.device
Ttable = SIGMOID_TABLE.table.to(qinput.dtype).to(quant_device)
output = output.to(quant_device)
NndctSigmoidTableLookup(input,
Ttable,
output,
fragpos)
bnfp = self.quantizer.get_quant_config(input_name, False)
bnfp[1] = 15
self.quantizer.set_quant_config(self.node.name, bnfp)
return output
import torch
def Sigmoid(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode is None:
return torch.nn.Sigmoid(*args, **kwargs)
return deephi_Sigmoid(*args, **kwargs) | null |
23,619 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.utils import NndctOption
from nndct_shared.quantization import kernel_need_quant
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
import numpy as np
from pytorch_nndct.utils import Const
from .fix_ops import NndctISqrt, NndctAIEISqrt
class deephi_LayerNorm(torch.nn.LayerNorm):
def __init__(self, *args, **kwargs):
def forward(self, input):
def _simulatedLayerNorm1(self, inp, normalized_shape, elementwise_affine, weight, bias, ifp, wfp, bfp):
def _simulatedLayerNorm2(self, inp, normalized_shape, elementwise_affine, weight, bias, ifp, wfp, bfp):
def LayerNorm(*args, **kwargs):
quant_mode,_ = maybe_get_quantizer()
if quant_mode is None:
return torch.nn.LayerNorm(*args, **kwargs)
return deephi_LayerNorm(*args, **kwargs) | null |
23,620 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger, QError
from nndct_shared.quantization import kernel_need_quant
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import maybe_get_quantizer
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
from pytorch_nndct.utils import Const
from .fix_ops import NndctISqrt
class deephi_InstanceNorm(torch.nn.modules.instancenorm._InstanceNorm):
def __init__(self, *args, **kwards):
def forward(self, input):
def simulateInstanceNorm(self, inp, params):
def instancenorm_process(inp, ifp, weight, wfp, bias, bfp):
def simulateInstanceNorm2(inp, affine, weight, bias, ifp, wfp, bfp):
def InstanceNorm(*args, **kwargs):
return deephi_InstanceNorm(*args, **kwargs) | null |
23,621 | import torch.nn.functional as F
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
def get_same_padding(kernel_size):
if isinstance(kernel_size, (tuple, list)):
assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size
p1 = get_same_padding(kernel_size[0])
p2 = get_same_padding(kernel_size[1])
return p1, p2
assert isinstance(
kernel_size,
int), 'kernel size should be either `int` or `tuple` or `list`'
assert kernel_size % 2 > 0, 'kernel size should be odd number'
return kernel_size // 2 | null |
23,622 | import torch.nn.functional as F
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
def sub_filter_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
dev = sub_kernel_size // 2
start, end = center - dev, center + dev + 1
assert end - start == sub_kernel_size
return start, end | null |
23,623 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
class deephi_Mish(torch.nn.Mish):
def __init__(self, *args, **kwards):
def forward(self, input):
def Mish(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.Mish(*args, **kwargs)
return deephi_Mish(*args, **kwargs) | null |
23,624 | import torch
import math
def eval_qnoise(output, res_f, efficency, deviation, rate, stop):
error = torch.add(output, res_f, alpha=-1).data
noise = error.pow(2).mean()
if noise > 0:
eff = 1.25 * res_f.pow(2).mean().div(noise).log10().detach().cpu().numpy()
dev = math.fabs(eff - efficency)
if dev > 0:
efficency = (efficency * 4 + eff) * 0.2
deviation = (deviation * 4 + dev) * 0.2
#print(node.name, efficency, deviation)
if efficency > 4.0:
rate = rate * 0.5
if (efficency > 4.3 or
(deviation / efficency) < 0.05 or
math.fabs(dev - deviation / dev) < 0.05):
stop = True
else:
stop = True
else:
stop = True
return error, rate, stop, efficency, deviation | null |
23,625 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption
from nndct_shared.quantization import quantize_tensors
from nndct_shared.quantization import maybe_get_quantizer
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
class deephi_BatchNorm(torch.nn.modules.batchnorm._BatchNorm):
r"""DeePhi batchnorm operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_BatchNorm, self).__init__(*args, **kwards)
self.params_name = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.param_saved = False
self.param_quantized = False
def forward(self, input):
params = [self.weight, self.bias]
param_names = self.params_name[:2]
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
if (not self.param_quantized):
inplace = (NndctOption.nndct_quant_off.value or self.quantizer is not None and self.quantizer.inplace)
# quantize weights and bias
if inplace:
_ = quantize_tensors(
params,
self.node,
tensor_names=param_names,
tensor_type='param')
qparams = [p for p in params]
else:
qparams = quantize_tensors(
params,
self.node,
tensor_names=param_names,
tensor_type='param')
if not NndctOption.nndct_quant_off.value:
self.param_quantized = True
else:
qparams = [p for p in params]
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
output = torch.nn.functional.batch_norm(
qinput,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean if not self.training or self.track_running_stats else None,
self.running_var if not self.training or self.track_running_stats else None,
qparams[0],
qparams[1],
bn_training,
exponential_average_factor,
self.eps,
)
# quantize output
output = quantize_tensors([output], self.node)[0]
return output
def _check_input_dim(self, input):
pass
def BatchNorm(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
def _check_input_dim(self, input):
pass
import types
nn = torch.nn.modules.batchnorm._BatchNorm(*args, **kwargs)
nn._check_input_dim = types.MethodType(_check_input_dim, nn)
return nn
return deephi_BatchNorm(*args, **kwargs) | null |
23,626 | import torch
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger, QError, QWarning
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
from .add import Add
from .multiply import Mul
class deephi_Linear(torch.nn.modules.linear.Linear):
r"""DeePhi Linear operation, support float and double"""
def __init__(self, *args, **kwards):
super(deephi_Linear, self).__init__(*args, **kwards)
self.params_name = None
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.param_saved = False
self.param_quantized = False
# self.weight and self.bias are not quantized float parameters
self.weight_bak = None # backup of float bias for bias correction
self.bias_bak = None # backup of float bias for bias correction
self.stop = False
self.rate = NndctOption.nndct_param_corr_rate.value
self.efficency = 0.0
self.deviation = 0.0
def forward(self, input):
if self.quantizer is None or NndctOption.nndct_quant_off.value is True:
return self.fp32_forward(input)
else:
return self.fake_quantize_forward(input)
def fp32_forward(self, input):
return super().forward(input)
def fake_quantize_forward(self, input):
# backup bias for bias correction feature
if (not self.param_saved):
if NndctOption.nndct_param_corr.value > 0:
# backup orignal float parameters
if self.quant_mode == 1:
self.weight_bak = self.weight.detach().clone()
if self.bias is not None:
self.bias_bak = self.bias.detach().clone()
# adjust bias
if self.quant_mode == 2 and self.bias is not None:
if not self.quantizer.has_bias_corr(self.node):
NndctScreenLogger().error2user(QError.BIAS_CORRECTION, f"Bias correction file in quantization result directory does not match current model.")
exit(2)
self.bias.data = torch.sub(self.bias.data, torch.tensor(
self.quantizer.get_bias_corr(self.node),
device=self.bias.data.device,
dtype=self.bias.data.dtype))
self.param_saved = True
# quantize parameters
qweight = None
qbias = None
inplace = (NndctOption.nndct_quant_off.value or
self.quantizer is not None and self.quantizer.inplace)
if (not self.param_quantized):
if inplace:
_ = quantize_tensors(
[self.weight],
self.node,
tensor_names = [self.params_name[0]],
tensor_type = 'param')[0]
qweight = self.weight
if self.bias is not None:
_ = quantize_tensors(
[self.bias],
self.node,
tensor_names = [self.params_name[1]],
tensor_type = 'param')[0]
qbias = self.bias
else:
qweight = quantize_tensors(
[self.weight],
self.node,
tensor_names = [self.params_name[0]],
tensor_type = 'param')[0]
if self.bias is not None:
qbias = quantize_tensors(
[self.bias],
self.node,
tensor_names = [self.params_name[1]],
tensor_type = 'param')[0]
if not NndctOption.nndct_quant_off.value:
self.param_quantized = True
else:
qweight = self.weight
qbias = self.bias
# quantize input tensor
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
# split linear to mul and add operations
if (self.quant_mode == 2 and self.quantizer.is_lstm):
# i * w
output = torch.matmul(qinput, torch.transpose(qweight, 0, 1))
datatype = 'int'
if NndctOption.nndct_only_int_quant.value is False:
datatype = self.quantizer.get_quant_dtype(self.node.name, tensor_type='output')
output = self.quantizer.quantize(output, self.node.name, self.node, tensor_type='output', datatype=datatype)
# i*w + bias
if self.bias is not None:
output = torch.add(output, qbias)
else:
output = torch.nn.functional.linear(qinput, qweight, qbias)
output = quantize_tensors([output], self.node)[0]
if NndctOption.nndct_param_corr.value > 0:
#rate = NndctOption.nndct_param_corr_rate.value
# statistic of quantization error
if (self.quant_mode == 1 and not self.stop):
res_f = torch.matmul(input, torch.transpose(self.weight_bak, 0, 1))
if self.bias is not None:
res_f = torch.add(res_f, self.bias_bak)
error, rate, self.stop, self.efficency, self.deviation = eval_qnoise(
output,
res_f,
self.efficency,
self.deviation,
self.rate,
self.stop)
if (not self.stop) and (self.bias is not None):
error = error.mean(dim = [k for k in range(error.dim()-1)])
self.bias.data = torch.sub(self.bias.data, error, alpha=rate)
self.param_quantized = False
return output
def bias_corr(self):
if self.bias is not None and self.bias_bak is not None:
bias_err = torch.sub(self.bias_bak, self.bias.data)
return bias_err.cpu().numpy().tolist()
else:
return None
def Linear(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.Linear(*args, **kwargs)
return deephi_Linear(*args, **kwargs) | null |
23,627 | class ApproxModes(object):
NO_APPROX = 'no_approx'
EXP_POLY = 'exp_poly'
EXP_LUT = 'exp_lut'
QIO = 'quant_input_output'
def is_no_approx(mode):
return mode == ApproxModes.NO_APPROX | null |
23,628 | class ApproxModes(object):
NO_APPROX = 'no_approx'
EXP_POLY = 'exp_poly'
EXP_LUT = 'exp_lut'
QIO = 'quant_input_output'
def is_exp_poly(mode):
return mode == ApproxModes.EXP_POLY | null |
23,629 | class ApproxModes(object):
NO_APPROX = 'no_approx'
EXP_POLY = 'exp_poly'
EXP_LUT = 'exp_lut'
QIO = 'quant_input_output'
def is_exp_lut(mode):
return mode == ApproxModes.EXP_LUT | null |
23,630 | class ApproxModes(object):
def is_quant_input_output(mode):
return mode == ApproxModes.QIO | null |
23,631 | class ApproxModes(object):
NO_APPROX = 'no_approx'
EXP_POLY = 'exp_poly'
EXP_LUT = 'exp_lut'
QIO = 'quant_input_output'
def available_modes():
return [ApproxModes.NO_APPROX, ApproxModes.EXP_POLY, ApproxModes.EXP_LUT] | null |
23,632 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def mult_add(a_bf16, b_bf16, c_fp32):
assert a_bf16.dtype == torch.bfloat16 and b_bf16.dtype == torch.bfloat16
out = a_bf16.to(torch.float32) * b_bf16.to(torch.float32) + c_fp32
return out | null |
23,633 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
def ploy_HORNER_SCHEME(r, cs, degree):
out = mult_add(r, cs[degree].to(torch.bfloat16), cs[degree - 1])
for i in reversed(range(degree - 1)):
out = mult_add(r, out.to(torch.bfloat16), cs[i])
out = out.to(r.dtype)
return out
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def ploy_HORNER_SCHEME(r, cs, degree):
out = mult_add(r, cs[degree].to(torch.bfloat16), cs[degree - 1])
for i in reversed(range(degree - 1)):
out = mult_add(r, out.to(torch.bfloat16), cs[i])
out = out.to(r.dtype)
return out
def get_sigmoid_positive_ploy_coeffcients(degree=4):
if not degree in list(SIGMOID_POSITIVE_PLOY_COEFFCIENTS.keys()):
raise NotImplementedError(
f'degree must in: {list(SIGMOID_POSITIVE_PLOY_COEFFCIENTS.keys())}')
cs = np.array(SIGMOID_POSITIVE_PLOY_COEFFCIENTS[degree], dtype=np.float32)
return cs
def sigmoid_positive_approx(z, degree=4):
rge = [-7.0, 7.0]
zero_mask = (z < rge[0]).to(torch.bfloat16)
one_mask = (z > rge[1]).to(torch.bfloat16)
x_sgn, x = torch.sign(z), torch.clamp(torch.abs(z), 0, rge[1])
m_zero_mask = (x_sgn < 0).to(torch.bfloat16)
cs = get_sigmoid_positive_ploy_coeffcients(degree)
coeffs = torch.from_numpy(cs).to(z.device)
# out = coeffs[0] + z * (coeffs[1] + z * (coeffs[2] + z * coeffs[3]))
out = ploy_HORNER_SCHEME(x, coeffs, degree)
out = m_zero_mask - out * m_zero_mask + out * (1 - m_zero_mask)
out = out * (1.0 - zero_mask - one_mask) + one_mask
return out | null |
23,634 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
def ploy_HORNER_SCHEME(r, cs, degree):
out = mult_add(r, cs[degree].to(torch.bfloat16), cs[degree - 1])
for i in reversed(range(degree - 1)):
out = mult_add(r, out.to(torch.bfloat16), cs[i])
out = out.to(r.dtype)
return out
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def ploy_HORNER_SCHEME(r, cs, degree):
out = mult_add(r, cs[degree].to(torch.bfloat16), cs[degree - 1])
for i in reversed(range(degree - 1)):
out = mult_add(r, out.to(torch.bfloat16), cs[i])
out = out.to(r.dtype)
return out
def get_tanh_positive_poly_coeffcients(degree=4):
cs = np.array(TANH_POSITIVE_POLY_COEFFCIENTS[degree], dtype=np.float32)
return cs
def tanh_positive_approx(z, degree=4):
assert z.dtype == torch.bfloat16
rge = [-4.0, 4.0]
m_one_mask = (z < rge[0]).to(torch.bfloat16)
one_mask = (z > rge[1]).to(torch.bfloat16)
x_sgn, x = torch.sign(z), torch.clamp(torch.abs(z), 0, rge[1])
cs = get_tanh_positive_poly_coeffcients(degree=degree)
coeffs = torch.from_numpy(cs).to(z.device)
out = ploy_HORNER_SCHEME(x, coeffs, degree)
out = out * x_sgn
out = out * (1.0 - m_one_mask - one_mask) + one_mask + m_one_mask * (-1)
return out | null |
23,635 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def exp_approx_poly(x, exp_table_size=1, degree=3, method='HORNER'):
assert method == 'HORNER'
assert x.dtype == torch.float32
EXP_TABLE_SIZE = exp_table_size
cs = get_exp_poly_coeffcients(exp_table_size, degree)
cs = torch.from_numpy(cs).to(x.device)
if EXP_TABLE_SIZE > 1:
scaling_factor = EXP_TABLE_SIZE / np.log(2)
n = _AMD_ROUND(x.to(torch.float32) * scaling_factor)
m = _AMD_FLOOR((n / EXP_TABLE_SIZE).to(torch.float32)) #.to(torch.bfloat16)
else:
scaling_factor = x.to(torch.bfloat16) * (
torch.scalar_tensor(EXP_TABLE_SIZE / LN2).to(torch.bfloat16))
n = _AMD_ROUND(scaling_factor.to(torch.float32)).to(torch.bfloat16)
m = _AMD_FLOOR((n / EXP_TABLE_SIZE).to(torch.float32)).to(torch.bfloat16)
j = n - m * EXP_TABLE_SIZE #n % EXP_TABLE_SIZE
# exp = 2**( (EXP_TABLE_SIZE * m + j + f) / EXP_TABLE_SIZE)
# (j/EXP_TABLE_SIZE) < 1.0: Look up table for 2**(j/EXP_TABLE_SIZE).
# r = np.exp(f*(LN2/EXP_TABLE_SIZE)) < 1.0: Polynomial
r = x - n.to(torch.float32) * (LN2 / EXP_TABLE_SIZE)
# exp = 2**m * (2**(j/EXP_TABLE_SIZE)) * np.exp(r)
if method == 'HORNER':
# exp_x = ( 2**m * (2**(j/EXP_TABLE_SIZE)) * ploy_HORNER_SCHEME(r.to(torch.bfloat16), cs, degree) ).to(torch.bfloat16
exp_x = (2**(j / EXP_TABLE_SIZE).to(torch.bfloat16)) * ploy_HORNER_SCHEME(
r.to(torch.bfloat16), cs, degree)
if EXP_TABLE_SIZE > 1:
assert m.dtype == torch.float32 and exp_x.dtype == torch.bfloat16
else:
assert m.dtype == torch.bfloat16 and exp_x.dtype == torch.bfloat16
exp_x = 2**m.to(torch.float32) * exp_x.to(torch.float32)
exp_x = torch.clamp(exp_x, _MIN_BFLOAT16, _MAX_BFLOAT16).to(torch.bfloat16)
assert exp_x.dtype == torch.bfloat16
return exp_x
def reciprocal_approx_moroz(z, output_subnormals=False):
assert z.dtype == torch.float32
# @Ephrem (ephremw@xilinx.com)
def okay_to_process(x):
r"""Indicates which elements in the tensor are ready for reciprocal
approximation.
The FP32 exponent ranges from -126 to 127. The largest number whose reciprocal
does not become subnormal is :math:`2^{126}` because :math:`2^{-126}`
is the smallest normal number.
This function returns `True` if `x` is normal and :math:`0 < |x| \le 2^{126}`.
"""
okay_idx = np.logical_and(is_normal(x), x != 0)
if output_subnormals:
return okay_idx
return np.logical_and(
okay_idx,
x <= np.exp2(
min(
# maxexp=128, smallest for overflow
# pylint: disable=no-member
np.abs(np.finfo(np.float32).minexp),
np.abs(np.finfo(np.float32).maxexp - 2))))
def mult_add_func(a, b, c):
return a * b + c
magic_n = np.int32(0x7eb53567)
k1 = np.float32(1.9395974)
k2 = np.float32(1.436142)
magic = [magic_n, k1, k2]
x = z.detach().cpu().numpy()
frac, exponent = np.frexp(x)
reduced_x = np.ldexp(frac, 1, dtype=np.float32)
y_as_int = np.int32(magic[0]) - reduced_x.view(np.int32)
y_as_fp32 = y_as_int.view(np.float32)
y_as_fp32 = np.where(
okay_to_process(x),
# 1st (modified) Newton-Raphson
mult_add_func(
mult_add_func(magic[1], y_as_fp32, 0.0),
mult_add_func(-reduced_x, y_as_fp32, np.float32(magic[2])), 0.0),
y_as_fp32)
# r = 1 - xy, y = ry + y # 2nd (classic) Newton-Raphson
y_as_fp32 = np.where(
okay_to_process(x),
mult_add_func(y_as_fp32,
mult_add_func(y_as_fp32, -reduced_x, np.float32(1.0)),
y_as_fp32), y_as_fp32)
out = np.where(
np.logical_or(np.isfinite(x) is False, exponent > 126 + 1),
np.copysign(np.float32(0), x),
np.where(
np.logical_or(is_subnormal(x), x == 0),
np.copysign(np.float32(np.inf), x),
np.ldexp(y_as_fp32, -exponent + 1).astype(np.float32)))
out_tensor = torch.from_numpy(out).to(z.device).to(z.dtype)
return out_tensor
def tanh_with_exp_approx(z,
degree=3,
exp_table_size=1,
output_subnormals=False,
dtype=torch.bfloat16):
assert z.dtype == dtype
sign = torch.sign(z).to(dtype)
x = torch.abs(z)
out = -2.0 * x
out = 1.0 + exp_approx_poly(
out.to(torch.float32), exp_table_size=exp_table_size,
degree=degree).to(dtype)
out = reciprocal_approx_moroz(
out.to(torch.float32), output_subnormals=output_subnormals).to(dtype)
out = 2.0 * out - 1.0
out = out * sign
assert out.dtype == dtype
return out | null |
23,636 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def exp_approx_lut(x, lg_table_depth=8):
def reciprocal_approx_moroz(z, output_subnormals=False):
def tanh_with_exp_approx_lut(z, output_subnormals=False, dtype=torch.bfloat16):
assert z.dtype == dtype
sign = torch.sign(z).to(dtype)
x = torch.abs(z)
out = -2.0 * x
out = 1.0 + exp_approx_lut(out.to(torch.bfloat16)).to(dtype)
out = reciprocal_approx_moroz(
out.to(torch.float32), output_subnormals=output_subnormals).to(dtype)
out = 2.0 * out - 1.0
out = out * sign
assert out.dtype == dtype
return out | null |
23,637 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def exp_approx_poly(x, exp_table_size=1, degree=3, method='HORNER'):
def reciprocal_approx_moroz(z, output_subnormals=False):
def sigmoid_with_exp_approx(z,
degree=3,
exp_table_size=1,
output_subnormals=False,
dtype=torch.bfloat16):
assert z.dtype == dtype
z = z.to(torch.float32)
out = 1.0 + exp_approx_poly(
-z, exp_table_size=exp_table_size, degree=degree).to(dtype)
out = reciprocal_approx_moroz(
out.to(torch.float32), output_subnormals=output_subnormals)
out = out.to(dtype)
assert out.dtype == dtype
return out | null |
23,638 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def exp_approx_lut(x, lg_table_depth=8):
assert x.dtype == torch.bfloat16
beta_t = np.exp2(lg_table_depth).astype(int)
scaling_factor = beta_t / np.log(2)
scaled_x = _AMD_ROUND(x.to(torch.float32) * scaling_factor)
assert scaled_x.dtype == torch.float32
quotient = _AMD_FLOOR(scaled_x / beta_t)
remainder = scaled_x - quotient * beta_t #scaled_x % beta_t
# fraction, exponent = np.frexp(to_dtype(np.exp2(remainder/beta_t), out_dtype))
# exponent = (exponent + quotient).astype(int)
# y = np.ldexp(fraction, exponent)
y = (2**quotient) * (2**(remainder / beta_t).to(torch.bfloat16))
y = torch.clamp(y, _MIN_BFLOAT16, _MAX_BFLOAT16).to(torch.bfloat16)
assert y.dtype == torch.bfloat16
return y
def reciprocal_approx_moroz(z, output_subnormals=False):
assert z.dtype == torch.float32
# @Ephrem (ephremw@xilinx.com)
def okay_to_process(x):
r"""Indicates which elements in the tensor are ready for reciprocal
approximation.
The FP32 exponent ranges from -126 to 127. The largest number whose reciprocal
does not become subnormal is :math:`2^{126}` because :math:`2^{-126}`
is the smallest normal number.
This function returns `True` if `x` is normal and :math:`0 < |x| \le 2^{126}`.
"""
okay_idx = np.logical_and(is_normal(x), x != 0)
if output_subnormals:
return okay_idx
return np.logical_and(
okay_idx,
x <= np.exp2(
min(
# maxexp=128, smallest for overflow
# pylint: disable=no-member
np.abs(np.finfo(np.float32).minexp),
np.abs(np.finfo(np.float32).maxexp - 2))))
def mult_add_func(a, b, c):
return a * b + c
magic_n = np.int32(0x7eb53567)
k1 = np.float32(1.9395974)
k2 = np.float32(1.436142)
magic = [magic_n, k1, k2]
x = z.detach().cpu().numpy()
frac, exponent = np.frexp(x)
reduced_x = np.ldexp(frac, 1, dtype=np.float32)
y_as_int = np.int32(magic[0]) - reduced_x.view(np.int32)
y_as_fp32 = y_as_int.view(np.float32)
y_as_fp32 = np.where(
okay_to_process(x),
# 1st (modified) Newton-Raphson
mult_add_func(
mult_add_func(magic[1], y_as_fp32, 0.0),
mult_add_func(-reduced_x, y_as_fp32, np.float32(magic[2])), 0.0),
y_as_fp32)
# r = 1 - xy, y = ry + y # 2nd (classic) Newton-Raphson
y_as_fp32 = np.where(
okay_to_process(x),
mult_add_func(y_as_fp32,
mult_add_func(y_as_fp32, -reduced_x, np.float32(1.0)),
y_as_fp32), y_as_fp32)
out = np.where(
np.logical_or(np.isfinite(x) is False, exponent > 126 + 1),
np.copysign(np.float32(0), x),
np.where(
np.logical_or(is_subnormal(x), x == 0),
np.copysign(np.float32(np.inf), x),
np.ldexp(y_as_fp32, -exponent + 1).astype(np.float32)))
out_tensor = torch.from_numpy(out).to(z.device).to(z.dtype)
return out_tensor
def sigmoid_with_exp_approx_lut(z,
output_subnormals=False,
dtype=torch.bfloat16):
assert z.dtype == dtype
z = z.to(torch.bfloat16)
out = 1.0 + exp_approx_lut(-z).to(dtype)
out = reciprocal_approx_moroz(
out.to(torch.float32), output_subnormals=output_subnormals)
out = out.to(dtype)
assert out.dtype == dtype
return out | null |
23,639 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def exp_approx_poly(x, exp_table_size=1, degree=3, method='HORNER'):
assert method == 'HORNER'
assert x.dtype == torch.float32
EXP_TABLE_SIZE = exp_table_size
cs = get_exp_poly_coeffcients(exp_table_size, degree)
cs = torch.from_numpy(cs).to(x.device)
if EXP_TABLE_SIZE > 1:
scaling_factor = EXP_TABLE_SIZE / np.log(2)
n = _AMD_ROUND(x.to(torch.float32) * scaling_factor)
m = _AMD_FLOOR((n / EXP_TABLE_SIZE).to(torch.float32)) #.to(torch.bfloat16)
else:
scaling_factor = x.to(torch.bfloat16) * (
torch.scalar_tensor(EXP_TABLE_SIZE / LN2).to(torch.bfloat16))
n = _AMD_ROUND(scaling_factor.to(torch.float32)).to(torch.bfloat16)
m = _AMD_FLOOR((n / EXP_TABLE_SIZE).to(torch.float32)).to(torch.bfloat16)
j = n - m * EXP_TABLE_SIZE #n % EXP_TABLE_SIZE
# exp = 2**( (EXP_TABLE_SIZE * m + j + f) / EXP_TABLE_SIZE)
# (j/EXP_TABLE_SIZE) < 1.0: Look up table for 2**(j/EXP_TABLE_SIZE).
# r = np.exp(f*(LN2/EXP_TABLE_SIZE)) < 1.0: Polynomial
r = x - n.to(torch.float32) * (LN2 / EXP_TABLE_SIZE)
# exp = 2**m * (2**(j/EXP_TABLE_SIZE)) * np.exp(r)
if method == 'HORNER':
# exp_x = ( 2**m * (2**(j/EXP_TABLE_SIZE)) * ploy_HORNER_SCHEME(r.to(torch.bfloat16), cs, degree) ).to(torch.bfloat16
exp_x = (2**(j / EXP_TABLE_SIZE).to(torch.bfloat16)) * ploy_HORNER_SCHEME(
r.to(torch.bfloat16), cs, degree)
if EXP_TABLE_SIZE > 1:
assert m.dtype == torch.float32 and exp_x.dtype == torch.bfloat16
else:
assert m.dtype == torch.bfloat16 and exp_x.dtype == torch.bfloat16
exp_x = 2**m.to(torch.float32) * exp_x.to(torch.float32)
exp_x = torch.clamp(exp_x, _MIN_BFLOAT16, _MAX_BFLOAT16).to(torch.bfloat16)
assert exp_x.dtype == torch.bfloat16
return exp_x
def reciprocal_approx_moroz(z, output_subnormals=False):
assert z.dtype == torch.float32
# @Ephrem (ephremw@xilinx.com)
def okay_to_process(x):
r"""Indicates which elements in the tensor are ready for reciprocal
approximation.
The FP32 exponent ranges from -126 to 127. The largest number whose reciprocal
does not become subnormal is :math:`2^{126}` because :math:`2^{-126}`
is the smallest normal number.
This function returns `True` if `x` is normal and :math:`0 < |x| \le 2^{126}`.
"""
okay_idx = np.logical_and(is_normal(x), x != 0)
if output_subnormals:
return okay_idx
return np.logical_and(
okay_idx,
x <= np.exp2(
min(
# maxexp=128, smallest for overflow
# pylint: disable=no-member
np.abs(np.finfo(np.float32).minexp),
np.abs(np.finfo(np.float32).maxexp - 2))))
def mult_add_func(a, b, c):
return a * b + c
magic_n = np.int32(0x7eb53567)
k1 = np.float32(1.9395974)
k2 = np.float32(1.436142)
magic = [magic_n, k1, k2]
x = z.detach().cpu().numpy()
frac, exponent = np.frexp(x)
reduced_x = np.ldexp(frac, 1, dtype=np.float32)
y_as_int = np.int32(magic[0]) - reduced_x.view(np.int32)
y_as_fp32 = y_as_int.view(np.float32)
y_as_fp32 = np.where(
okay_to_process(x),
# 1st (modified) Newton-Raphson
mult_add_func(
mult_add_func(magic[1], y_as_fp32, 0.0),
mult_add_func(-reduced_x, y_as_fp32, np.float32(magic[2])), 0.0),
y_as_fp32)
# r = 1 - xy, y = ry + y # 2nd (classic) Newton-Raphson
y_as_fp32 = np.where(
okay_to_process(x),
mult_add_func(y_as_fp32,
mult_add_func(y_as_fp32, -reduced_x, np.float32(1.0)),
y_as_fp32), y_as_fp32)
out = np.where(
np.logical_or(np.isfinite(x) is False, exponent > 126 + 1),
np.copysign(np.float32(0), x),
np.where(
np.logical_or(is_subnormal(x), x == 0),
np.copysign(np.float32(np.inf), x),
np.ldexp(y_as_fp32, -exponent + 1).astype(np.float32)))
out_tensor = torch.from_numpy(out).to(z.device).to(z.dtype)
return out_tensor
def softmax_approx_poly(x, axis=-1, exp_table_size=1, degree=3):
x = x - torch.max(x.to(torch.float32), axis, keepdims=True)[0].to(x.dtype)
exp_x = exp_approx_poly(x, exp_table_size=exp_table_size, degree=degree)
# NOTE: approximate: r = exp_x / torch.sum(exp_x, axis, keepdim=True)
exp_x_sum = torch.sum(exp_x.to(torch.float32), axis, keepdim=True)
exp_x_sum_reciprocal = reciprocal_approx_moroz(exp_x_sum)
r = exp_x.to(torch.float32) * exp_x_sum_reciprocal
r = r.to(torch.bfloat16)
return r | null |
23,640 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def exp_approx_lut(x, lg_table_depth=8):
assert x.dtype == torch.bfloat16
beta_t = np.exp2(lg_table_depth).astype(int)
scaling_factor = beta_t / np.log(2)
scaled_x = _AMD_ROUND(x.to(torch.float32) * scaling_factor)
assert scaled_x.dtype == torch.float32
quotient = _AMD_FLOOR(scaled_x / beta_t)
remainder = scaled_x - quotient * beta_t #scaled_x % beta_t
# fraction, exponent = np.frexp(to_dtype(np.exp2(remainder/beta_t), out_dtype))
# exponent = (exponent + quotient).astype(int)
# y = np.ldexp(fraction, exponent)
y = (2**quotient) * (2**(remainder / beta_t).to(torch.bfloat16))
y = torch.clamp(y, _MIN_BFLOAT16, _MAX_BFLOAT16).to(torch.bfloat16)
assert y.dtype == torch.bfloat16
return y
def reciprocal_approx_moroz(z, output_subnormals=False):
assert z.dtype == torch.float32
# @Ephrem (ephremw@xilinx.com)
def okay_to_process(x):
r"""Indicates which elements in the tensor are ready for reciprocal
approximation.
The FP32 exponent ranges from -126 to 127. The largest number whose reciprocal
does not become subnormal is :math:`2^{126}` because :math:`2^{-126}`
is the smallest normal number.
This function returns `True` if `x` is normal and :math:`0 < |x| \le 2^{126}`.
"""
okay_idx = np.logical_and(is_normal(x), x != 0)
if output_subnormals:
return okay_idx
return np.logical_and(
okay_idx,
x <= np.exp2(
min(
# maxexp=128, smallest for overflow
# pylint: disable=no-member
np.abs(np.finfo(np.float32).minexp),
np.abs(np.finfo(np.float32).maxexp - 2))))
def mult_add_func(a, b, c):
return a * b + c
magic_n = np.int32(0x7eb53567)
k1 = np.float32(1.9395974)
k2 = np.float32(1.436142)
magic = [magic_n, k1, k2]
x = z.detach().cpu().numpy()
frac, exponent = np.frexp(x)
reduced_x = np.ldexp(frac, 1, dtype=np.float32)
y_as_int = np.int32(magic[0]) - reduced_x.view(np.int32)
y_as_fp32 = y_as_int.view(np.float32)
y_as_fp32 = np.where(
okay_to_process(x),
# 1st (modified) Newton-Raphson
mult_add_func(
mult_add_func(magic[1], y_as_fp32, 0.0),
mult_add_func(-reduced_x, y_as_fp32, np.float32(magic[2])), 0.0),
y_as_fp32)
# r = 1 - xy, y = ry + y # 2nd (classic) Newton-Raphson
y_as_fp32 = np.where(
okay_to_process(x),
mult_add_func(y_as_fp32,
mult_add_func(y_as_fp32, -reduced_x, np.float32(1.0)),
y_as_fp32), y_as_fp32)
out = np.where(
np.logical_or(np.isfinite(x) is False, exponent > 126 + 1),
np.copysign(np.float32(0), x),
np.where(
np.logical_or(is_subnormal(x), x == 0),
np.copysign(np.float32(np.inf), x),
np.ldexp(y_as_fp32, -exponent + 1).astype(np.float32)))
out_tensor = torch.from_numpy(out).to(z.device).to(z.dtype)
return out_tensor
def softmax_approx_lut(x, axis=-1, lg_table_depth=8):
x = x - torch.max(x.to(torch.float32), axis, keepdims=True)[0].to(x.dtype)
exp_x = exp_approx_lut(x, lg_table_depth=lg_table_depth)
# NOTE: approximate: r = exp_x / torch.sum(exp_x, axis, keepdim=True)
exp_x_sum = torch.sum(exp_x.to(torch.float32), axis, keepdim=True)
exp_x_sum_reciprocal = reciprocal_approx_moroz(exp_x_sum)
r = exp_x.to(torch.float32) * exp_x_sum_reciprocal
r = r.to(torch.bfloat16)
return r | null |
23,641 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
def ploy_HORNER_SCHEME(r, cs, degree):
out = mult_add(r, cs[degree].to(torch.bfloat16), cs[degree - 1])
for i in reversed(range(degree - 1)):
out = mult_add(r, out.to(torch.bfloat16), cs[i])
out = out.to(r.dtype)
return out
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def ploy_HORNER_SCHEME(r, cs, degree):
out = mult_add(r, cs[degree].to(torch.bfloat16), cs[degree - 1])
for i in reversed(range(degree - 1)):
out = mult_add(r, out.to(torch.bfloat16), cs[i])
out = out.to(r.dtype)
return out
def get_gelu_tanh_poly_coeffcients(degree=2):
cs = np.array(GLUE_TANH_POSITIVE_POLY_COEFFCIENTS[degree], dtype=np.float32)
return cs
def gelu_approx(x, degree=2):
assert x.dtype == torch.bfloat16
sqrt_2 = 1.4142
rge = [-2.5, 2.5]
zero_mask = (x < rge[0]).to(x.device).to(torch.bfloat16)
y_e_x_mask = (x > rge[1]).to(x.device).to(torch.bfloat16)
q = x
q_sgn, q = torch.sign(q), torch.clamp(torch.abs(q), 0, rge[1])
cs = get_gelu_tanh_poly_coeffcients(degree=degree)
coeffs = torch.from_numpy(cs).to(x.device).to(x.device)
out = ploy_HORNER_SCHEME(q, coeffs, degree)
out = x * 0.5 * (1 + out * q_sgn)
out = out * (1.0 - zero_mask - y_e_x_mask) + x * y_e_x_mask
assert out.dtype == torch.bfloat16
return out | null |
23,642 | import torch
import numpy as np
from .coefficient import get_sigmoid_positive_ploy_coeffcients, get_exp_poly_coeffcients, get_gelu_tanh_poly_coeffcients, get_tanh_positive_poly_coeffcients
from pytorch_nndct.utils.hw_dtype import is_subnormal, is_normal
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def isqrt_approx_quake(x, mantissa_bit=23, exponent_bit=8):
# From fast inverse squre root.
# gamma = 0.0450466 first order Taylor error.
# magic_number = 3/2 * 2^mantissa_bit * (2^(exponent_bit-1) - 1 - gamma)
gamma = 0.0450466
magic_n = np.array(
np.round(3.0 / 2.0 * 2.0**mantissa_bit *
(2.0**(exponent_bit - 1) - 1.0 - gamma)),
dtype=np.int32)
# From https://www.mdpi.com/1099-4300/23/1/86/pdf authored by
# Walczy, C.J.; Moroz, L.V..; Cie´sli´nski, J.L.
# This work is licensed under the Creative Commons Attribution 4.0
# International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
# magic_n = np.round(2**mantissa_bit *(3* (2.0**(exponent_bit-1) - 1.0) - 1) /2 + np.round(2**mantissa_bit * (3.7315712401613957182292407381942955 - 2)/4 - 0.5))
# magic_n = np.array(magic_n, dtype=np.int32)
number = x.cpu().numpy()
threehalfs = 1.5
x2 = number * 0.5
y = np.float32(number)
i = y.view(np.int32)
i = magic_n - np.int32(i >> 1)
y = i.view(np.float32)
y = y * (threehalfs - (x2 * y * y))
y = y * (threehalfs - (x2 * y * y))
return torch.from_numpy(y).to(x.device).to(x.dtype) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.