id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
23,743 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
class ModuleHooker(object):
def name_modules(module):
def add_outputs(module, record_once):
def add_output_intime(module):
def add_input_dump_time(module):
def apply_to_children(module, func):
def turn_on_record_outputs(module):
def turn_off_record_outputs(module):
def clear_record_outputs(module):
def turn_on_output_intime(module):
def turn_off_output_intime(module):
def clear_op_called_times(module):
def turn_on_input_dump(module):
def turn_off_input_dump(module):
def clear_input_called_times(module):
def register_state_dict_hook(cls, module):
def _resume_state_dict_key(op, destination, prefix, local_meta_data):
def _state_dict_hooker(op, hooker):
def register_output_hook(cls, module, record_once=True):
def _record_outputs(op, inputs, outputs):
def get_outputs_value(outputs):
def _output_hooker(op, record_func):
def register_input_dump_hook(cls, module):
def dump_input_data(module, inputs):
def inputs_flatten(tensors, tensors_flatten):
def register_output_intime_hook(cls, module):
def _dump_intime(op, inputs, outputs):
def get_outputs_value(outputs):
def dump_output_data(op, output_data, index=None):
def dump_output(op, output_data):
def _output_hooker(op, output_func):
def detach_node_from_module(cls, module):
def _del_node(op):
def hook_module_with_node(cls, module, graph):
def _add_node_on_module(op):
def update_parameters(cls, module, graph, graph2module):
def safe_torch_nn_Parameter(torch_tensor, requires_grad):
def _graph2module(op):
def _module2graph(op):
def _get_output_data(outptus, outptus_name):
def _get_output_shape(outputs, outputs_name):
def update_blobs_once(cls, module, graph=None, time_step=None, update_shape_only=False):
def _updata_tensor_data(node, nndct_tensor, torch_tensor):
def _updata_tensor_shape(node, nndct_tensor, torch_tensor):
def _update_node_outputs(op):
def clone_quant_module(cls, quant_module, quant_graph):
def hook_module_with_quantizer(cls, module, quantizer):
def _add_quantizer_on_module(op):
def hook_module_with_input_device_checker(cls, module, module_gen_from_script):
def _check_input_args(op, input):
def register_tensor_dtype_and_shape_hook(cls, module):
def _record_dtype_and_shape(op, inputs, outputs):
def _get_output_dtype_and_shape(output):
def _hooker(op, record_func):
def update_nndct_parameters(module: torch.nn.Module, graph: Graph) -> NoReturn:
ModuleHooker.update_parameters(module, graph, graph2module=False) | null |
23,744 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def insert_scale_after_conv2d(module: torch.nn.Module):
def _insert_func(op):
insert_name = None
conv2d_cnt = 0
find_conv2d = False
for op_name, c_op in op.named_children():
if find_conv2d:
conv2d_cnt = conv2d_cnt+1
if isinstance(c_op, torch.nn.Conv2d) or isinstance(c_op, torch.nn.ConvTranspose2d):
find_conv2d = True
insert_name = op_name
elif isinstance(c_op, torch.nn.BatchNorm2d) and (find_conv2d == True):
insert_name = op_name
if conv2d_cnt == 1:
op._modules[insert_name] = torch.nn.Sequential(op._modules[insert_name], channel_scale.ChannelScale(channel_scale=1.0))
find_conv2d = False
conv2d_cnt = 0
if find_conv2d:
op._modules[insert_name] = torch.nn.Sequential(op._modules[insert_name], channel_scale.ChannelScale(channel_scale=1.0))
if any([(isinstance(submodule, torch.nn.Conv2d) or isinstance(submodule, torch.nn.ConvTranspose2d)) for submodule in module.modules()]):
module.apply(_insert_func)
NndctScreenLogger().warning(f"ChannelScale has been inserted after Conv2d.") | null |
23,745 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def insert_scale_after_batchnorm2d(module: torch.nn.Module):
def _insert_func(op):
for op_name, c_op in op.named_children():
if isinstance(c_op, torch.nn.BatchNorm2d):
op._modules[op_name] = torch.nn.Sequential(op._modules[op_name], channel_scale.ChannelScale(channel_scale=1.0))
if any([(isinstance(submodule, torch.nn.BatchNorm2d)) for submodule in module.modules()]):
module.apply(_insert_func)
NndctScreenLogger().warning(f"ChannelScale has been inserted after batchnorm2d.") | null |
23,746 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
The provided code snippet includes necessary dependencies for implementing the `convert_lstm` function. Write a Python function `def convert_lstm(ori_module: torch.nn.Module, device)` to solve the following problem:
replace_torch_lstm_with_stacked_lstm
Here is the function:
def convert_lstm(ori_module: torch.nn.Module, device):
"""replace_torch_lstm_with_stacked_lstm
"""
if isinstance(ori_module, torch.nn.LSTM):
if NndctOption.nndct_jit_script.value:
return torch.jit.script(stacked_lstm(ori_module).to(device).eval())
else:
return stacked_lstm(ori_module).to(device).eval()
for n, m in ori_module.named_children():
if isinstance(m, torch.nn.LSTM):
if NndctOption.nndct_jit_script.value:
setattr(ori_module, n, torch.jit.script(stacked_lstm(m).to(device).eval()))
else:
setattr(ori_module, n, stacked_lstm(m).to(device).eval())
else:
convert_lstm(m, device)
return ori_module | replace_torch_lstm_with_stacked_lstm |
23,747 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def has_lstm(module):
for mod in module.modules():
if isinstance(mod, torch.nn.LSTM):
return True
return False | null |
23,748 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def _get_node_scope(node):
def _valid_bnfp(bnfp):
def get_module_name(module):
def get_module_name(module):
def insert_fix_neuron_in_script_model(script_model, quantizer):
quant_config = quantizer.quant_config
script_graph = script_model.graph
# device = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_DEVICE)
# device_id = 1 if device == torch.device("cpu") else 0
for node in quantizer.Nndctgraph.all_nodes():
if _get_node_scope(node) != get_module_name(script_model):
continue
state_dict = module_util.state_dict_from_node(node)
if state_dict:
for tensor_name, tensor in state_dict.items():
torch_value_name = jit_utils.nndct_name_2_jit_name(tensor_name)
const_node = jit_utils.find_fw_node_by_name(script_graph, torch_value_name, recursive=True)
device_id = 1 if quantizer.graph.param_tensor(tensor_name).device.device_type=='cpu' else 0
device = torch.device('cpu') if device_id == 1 else torch.device('cuda')
jit_utils.set_attr_value(const_node, "value", tensor.to(device))
if tensor_name in quant_config["param"].keys():
bnfp = quantizer.get_quant_config(tensor_name, True, "param")
if not _valid_bnfp(bnfp):
continue
method = -1 if NndctOption.nndct_use_torch_quantizer.value is True else 3
fix_node = jit_utils.create_fix_node(script_graph, jit_utils.get_node_output_at(const_node, 0), quant_min=-bnfp[0],
quant_max=bnfp[0]-1,
scale_inv=bnfp[1],
zero_point=0,
method=method,
device_id=device_id,
inplace=quantizer.inplace,
name=tensor_name,
tensor_type='param')
if fix_node is not None:
jit_utils.insert_after_node(script_graph, const_node, fix_node)
for nndct_node_name in quant_config["output"].keys():
if _get_node_scope(nndct_node_name) != get_module_name(script_model):
continue
for idx in range(quantizer.get_quant_len(nndct_node_name, "output")):
index = None if quantizer.get_quant_len(nndct_node_name, "output")==1 else idx
bnfp = quantizer.get_quant_config(nndct_node_name, True, "output", idx)
if not _valid_bnfp(bnfp):
continue
if quantizer.Nndctgraph.node(nndct_node_name).op.type == NNDCT_OP.INPUT:
torch_value_name = jit_utils.nndct_name_2_jit_name(nndct_node_name)
input_value = jit_utils.find_input_value_by_name(script_graph, torch_value_name)
input_node = jit_utils.get_fw_graph_input_node(script_graph)
device_id = 1 if quantizer.graph.node(nndct_node_name).out_tensors[idx].device.device_type=='cpu' else 0
method = 4 if quantizer.lstm else 2
if NndctOption.nndct_use_torch_quantizer.value is True:
method = -1
elif quantizer.lstm and NndctOption.nndct_ip_asr.value is True:
method = 3
fix_node = jit_utils.create_fix_node(script_graph, input_value, quant_min=-bnfp[0],
quant_max=bnfp[0]-1,
scale_inv=bnfp[1],
zero_point=0,
method=method,
device_id=device_id,
inplace=quantizer.inplace,
name=nndct_node_name,
tensor_type='output',
index = index)
if fix_node is not None:
jit_utils.insert_after_input(script_graph, input_node, fix_node, torch_value_name)
else:
torch_value_name = jit_utils.nndct_name_2_jit_name(nndct_node_name)
fw_node = jit_utils.find_fw_node_by_name(script_graph, torch_value_name, recursive=True)
device_id = 1 if quantizer.graph.node(nndct_node_name).out_tensors[idx].device.device_type=='cpu' else 0
method = 4 if quantizer.lstm else 2
if NndctOption.nndct_use_torch_quantizer.value is True:
method = -1
elif quantizer.lstm and NndctOption.nndct_ip_asr.value is True:
method = 3
if quantizer.Nndctgraph.node(nndct_node_name).op.type == NNDCT_OP.LAYER_NORM:
method = 4
fix_node = jit_utils.create_fix_node(script_graph, jit_utils.get_node_output_at(fw_node, idx), quant_min=-bnfp[0],
quant_max=bnfp[0]-1,
scale_inv=bnfp[1],
zero_point=0,
method=method,
device_id=device_id,
inplace=quantizer.inplace,
name=nndct_node_name,
tensor_type='output',
index=index)
if fix_node is not None:
jit_utils.insert_after_node(script_graph, fw_node, fix_node, idx)
for nndct_node_name in quant_config["input"].keys():
if _get_node_scope(nndct_node_name) != get_module_name(script_model):
continue
for idx in range(quantizer.get_quant_len(nndct_node_name, "input")):
index = None if quantizer.get_quant_len(nndct_node_name, "output")==1 else idx
bnfp = quantizer.get_quant_config(nndct_node_name, True, "input", idx)
if not _valid_bnfp(bnfp):
continue
torch_value_name = jit_utils.nndct_name_2_jit_name(nndct_node_name)
fw_node = jit_utils.find_fw_node_by_name(script_graph, torch_value_name, recursive=True)
input_node = jit_utils.get_in_node_at(fw_node, idx)
device_id = 1 if quantizer.graph.node(nndct_node_name).in_tensors[idx].device.device_type=='cpu' else 0
method = 4 if quantizer.lstm else 2
if NndctOption.nndct_use_torch_quantizer.value is True:
method = -1
elif quantizer.lstm and NndctOption.nndct_ip_asr.value is True:
method = 3
fix_node = jit_utils.create_fix_node(script_graph, jit_utils.get_node_output_at(input_node, 0), quant_min=-bnfp[0],
quant_max=bnfp[0]-1,
scale_inv=bnfp[1],
zero_point=0,
method=method,
device_id=device_id,
inplace=quantizer.inplace,
name=nndct_node_name,
tensor_type='input',
index=index)
if fix_node is not None:
jit_utils.insert_before_node(script_graph, fw_node, fix_node, idx)
return script_model | null |
23,749 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def opt_script_model_for_quant(script_model):
g = script_model.graph
jit_utils.remove_fused_bn(g)
jit_utils.remove_fused_ln_sigmoid(g)
jit_utils.remove_dropout(g)
jit_utils.remove_dce_node(g)
return script_model | null |
23,750 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def _get_node_scope(node):
if isinstance(node, str):
return node.split("::")[0]
else:
return node.name.split("::")[0]
def get_avgpool_dpu_coeff(kernel):
scale = 1.0
if kernel == [3, 3]:
scale = 9.0 * 7.0 / 64.0
elif kernel == [5, 5]:
scale = 25.0 * 10.0 / 256.0
elif kernel in [[6, 6], [3, 6], [6, 3]]:
scale = 36.0 * 7.0 / 256.0
elif kernel == [7, 7]:
scale = 49.0 * 21.0 / 1024.0
elif kernel == [14, 14]:
scale = 196.0 * 21.0 / 4096.0
else:
rec = kernel[0] * kernel[1]
max_factor = math.ceil(math.log(rec * 128,2))
diff = 1.0
multi_factor = 0.0
shift_factor = 0.0
for shift_factor_ in range(max_factor):
factor = round((2 ** shift_factor_)/rec)
diff_ = abs(factor / (2 ** shift_factor_) - 1/rec)
if diff_ < diff:
multi_factor = factor
diff = diff_
shift_factor = shift_factor_
scale = rec * multi_factor / (2 ** shift_factor)
return scale
def get_module_name(module):
if isinstance(module, torch.jit.ScriptModule):
return module.original_name
def get_module_name(module):
if isinstance(module, torch.jit.ScriptModule):
module_name = module.original_name
else:
module_name = module._get_name()
from pytorch_nndct.parse.rich_in_out_helper import FlattenInOutModelForTrace
if module_name == FlattenInOutModelForTrace.getModelName():
if isinstance(module, torch.jit.ScriptModule):
origin_name = FlattenInOutModelForTrace.getOriginModelNameFormString(str(module.graph))
if origin_name is not None:
return origin_name
else:
return module_name
else:
names = [k for k in dir(module) if 'nndct_st_' in k]
if len(names) > 0:
return names[0]
return module_name
def insert_mul_after_avgpool(script_model, quantizer):
def _insert_mul_node(nndct_node_name, kernel):
scale = get_avgpool_dpu_coeff(kernel)
torch_value_name = jit_utils.nndct_name_2_jit_name(nndct_node_name)
fw_node = jit_utils.find_fw_node_by_name(script_graph, torch_value_name)
mul_node = jit_utils.create_mul_node(script_graph, jit_utils.get_node_output_at(fw_node, 0), scale)
if mul_node is not None:
jit_utils.insert_after_node(script_graph, fw_node, mul_node)
script_graph = script_model.graph
nndct_graph = quantizer.Nndctgraph
for nndct_node in nndct_graph.all_nodes():
if _get_node_scope(nndct_node.name) != get_module_name(script_model):
continue
if nndct_node.op.type == NNDCT_OP.AVG_POOL:
kernel = nndct_node.node_attr(nndct_node.op.AttrName.KERNEL)
_insert_mul_node(nndct_node.name, kernel)
elif nndct_node.op.type == NNDCT_OP.ADAPTIVEAVGPOOL2D:
if not nndct_node.in_tensors[0].shape or (not nndct_node.out_tensors[0].shape):
continue
input_size = nndct_node.in_tensors[0].shape[2:]
output_size = nndct_node.out_tensors[0].shape[2:]
mod = [input_size[i] % output_size[i] for i in range(0, len(input_size))]
if mod != [0] * len(mod):
continue
stride_h = int(input_size[0] / output_size[0])
stride_w = int(input_size[1] / output_size[1])
kernel_h = input_size[0] - (output_size[0] - 1) * stride_h
kernel_w = input_size[1] - (output_size[1] - 1) * stride_w
_insert_mul_node(nndct_node.name, [kernel_w, kernel_h])
return script_model | null |
23,751 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def remove_quant_dequant_stub(script_model):
g = script_model.graph
jit_utils.remove_quant_dequant_stub(g)
return script_model | null |
23,752 | import importlib.util
import os
import sys
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
import nndct_shared.utils as nndct_utils
import pytorch_nndct.utils.jit_utils as jit_utils
import pytorch_nndct.utils.module_util as module_util
from nndct_shared.base import FrameworkType
from nndct_shared.compile import DevGraphOptimizer
from nndct_shared.nndct_graph import Graph, convert_block_node_to_graph
from nndct_shared.optimization import QuantOptimizer
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP,
NndctDebugLogger, NndctOption,
NndctScreenLogger, permute_data, permute_axes,
QError, QWarning, QNote)
from nndct_shared.utils.dpu_utils import get_avgpool_dpu_coeff
from pytorch_nndct.export import get_script_writer
from pytorch_nndct.nn import stacked_lstm
from pytorch_nndct.nn.modules import channel_scale, reluk
from pytorch_nndct.parse import TorchParser
from pytorch_nndct.utils import TorchSymbol, TorchGraphSymbol
from pytorch_nndct.utils.module_util import to_device, get_module_name
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from .ModuleHooker import ModuleHooker
def quant_model_inferenced(quant_model: Union[torch.nn.Module, List[torch.nn.Module]]) -> bool:
if isinstance(quant_model, list):
return all([qmod.is_inferenced for qmod in quant_model])
else:
return quant_model.is_inferenced | null |
23,753 | import copy
import os
import torch
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning, set_option_value)
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from pytorch_nndct.utils.onnx_utils import get_opset_version
from pytorch_nndct.utils.module_util import visualize_tensors, to_device, get_module_name
from .utils import update_nndct_blob_data
import numpy as np
GLOBAL_MAP = GlobalMap()
def get_opset_version():
if "_onnx_stable_opsets" in torch.onnx.symbolic_helper.__dict__:
return torch.onnx.symbolic_helper._onnx_stable_opsets[-1]
elif "onnx_stable_opsets" in torch.onnx._constants.__dict__:
return torch.onnx._constants.onnx_stable_opsets[-1]
elif "ONNX_MAX_OPSET" in torch.onnx._constants.__dict__:
return torch.onnx._constants.ONNX_MAX_OPSET
else:
raise RuntimeError("Onnx stable opset version is not found. Please check pytorch version (1.4.0 ~ 1.13.0)")
def get_module_name(module):
if isinstance(module, torch.jit.ScriptModule):
return module.original_name
def to_device(module, input_args, device):
if input_args is not None:
if isinstance(input_args, torch.Tensor):
input_args = input_args.to(device)
else:
is_tuple = True if isinstance(input_args, tuple) else False
input_args = list(input_args)
for i in range(len(input_args)):
_, inp = to_device(None, input_args[i], device)
input_args[i] = inp
if is_tuple:
input_args = tuple(input_args)
if module is not None:
module = module.to(device)
return module, input_args
def get_module_name(module):
if isinstance(module, torch.jit.ScriptModule):
module_name = module.original_name
else:
module_name = module._get_name()
from pytorch_nndct.parse.rich_in_out_helper import FlattenInOutModelForTrace
if module_name == FlattenInOutModelForTrace.getModelName():
if isinstance(module, torch.jit.ScriptModule):
origin_name = FlattenInOutModelForTrace.getOriginModelNameFormString(str(module.graph))
if origin_name is not None:
return origin_name
else:
return module_name
else:
names = [k for k in dir(module) if 'nndct_st_' in k]
if len(names) > 0:
return names[0]
return module_name
def export_onnx_model_for_lstm(quantizer, example_inputs, input_tensors_name, return_tensors_name, convert_script_to_qscript, output_dir,
verbose=False, dynamic_batch=False, opset_version=None, native_onnx=True, dump_layers=False, check_model=False, opt_graph=False):
from torch.onnx import register_custom_op_symbolic
from torch.onnx.symbolic_helper import parse_args
@parse_args("v", "v", "v", "v", "v", "v", "v", "v")
def symbolic_fix_neuron(g, input, valmin, valmax, valamp, zero_point, method, device_id, inplace):
return g.op("vai::fix_neuron", input, valmax, valamp, method, device_id, inplace).setType(input.type())
register_custom_op_symbolic("vai::fix_neuron", symbolic_fix_neuron, 9)
opset_version = NndctOption.nndct_onnx_opset_version.value if opset_version is None else opset_version
opset_version = get_opset_version() if opset_version==-1 else opset_version
device = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_DEVICE)
script_models = quantizer.scripts if len(quantizer.scripts) > 0 else None
quantizer.reset_status_for_exporting()
if script_models is None:
output_file = os.path.join(output_dir, f"{quantizer.quant_model._get_name()}_int.onnx")
model, input_args = to_device(quantizer.quant_model, example_inputs, device)
try:
torch.onnx.export(quantizer.quant_model, input_args, output_file,
verbose=verbose,
input_names=input_tensors_name,
opset_version=opset_version,
custom_opsets={'vai' : 2})
except Exception as e:
NndctScreenLogger().error2user(QError.EXPORT_ONNX, f"The {get_module_name(quantizer.quant_model)} can not be exported for onnx. The PyTorch internal failed reason is:\n{str(e)}.")
else:
NndctScreenLogger().info(f"{get_module_name(quantizer.quant_model)}_int.onnx is generated.({output_file})")
else:
if len(script_models) == 1:
inputs = [example_inputs]
inputs_name = [input_tensors_name]
outputs_name = [return_tensors_name]
else:
inputs = example_inputs
inputs_name = input_tensors_name
outputs_name = return_tensors_name
for script, inputs, input_name, output_name in zip(script_models, inputs, inputs_name, outputs_name):
q_model = convert_script_to_qscript(script, verbose=verbose)
_, inputs = to_device(None, inputs, device)
output_file = os.path.join(output_dir, f"{get_module_name(q_model)}_int.onnx")
try:
torch.onnx.export(q_model, inputs, output_file,
verbose=verbose,
input_names=input_name,
opset_version=opset_version,
custom_opsets={'vai' : 2})
except Exception as e:
NndctScreenLogger().error2user(QError.EXPORT_ONNX, f"The {get_module_name(q_model)} can not be exported for onnx. The PyTorch internal failed reason is:\n{str(e)}.")
NndctScreenLogger().info(f"{get_module_name(q_model)}_int.onnx is generated.({output_file})") | null |
23,754 | import copy
import os
import torch
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning, set_option_value)
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from pytorch_nndct.utils.onnx_utils import get_opset_version
from pytorch_nndct.utils.module_util import visualize_tensors, to_device, get_module_name
from .utils import update_nndct_blob_data
import numpy as np
def optimize_onnx_graph(onnx_path):
import onnx
from onnxsim import simplify
model_org = onnx.load(onnx_path)
# simplify onnx graph
model_simp, check = simplify(model_org)
if check:
NndctScreenLogger().info(f"Optimize onnx graph successfully.")
onnx.save(model_simp, onnx_path)
else:
NndctScreenLogger().info(f"Optimize onnx graph failed.")
def dump_onnx_layers(quantizer, output_dir, onnx_model, native_onnx, check_model):
import onnx
import os
# Check that the model is well formed
onnx.checker.check_model(onnx_model)
# input data
inputs = get_blob_input(quantizer) # numpy.ndarray
if inputs is None:
NndctScreenLogger().warning(f"ONNX model layers are not dumped since input data is None!")
return
# get model inputs and ouput names of each layer
ep_list = ['CUDAExecutionProvider', 'CPUExecutionProvider']
ort_inputs = {}
if native_onnx:
import onnxruntime as ort
ort_session = ort.InferenceSession(onnx_model, providers=ep_list)
org_outputs = [x for x in ort_session.get_outputs()]
for i in range(len(inputs)):
ort_inputs[ort_session.get_inputs()[i].name] = inputs[i]
else:
from onnxruntime_extensions import PyOrtFunction
from pytorch_nndct.apis import load_vai_ops
ort_session = PyOrtFunction.from_model(onnx_model)
org_outputs = [x for x in ort_session.output_names]
for i in range(len(inputs)):
ort_inputs[ort_session.input_names[i]] = inputs[i]
model = onnx.load(onnx_model)
for node in model.graph.node:
for output in node.output:
if output not in org_outputs:
model.graph.output.extend([onnx.ValueInfoProto(name=output)])
output_names = [x.name for x in model.graph.output] # all layer names
# save tmp model for dumping layers
onnx_model_tmp = output_dir + f"{quantizer.quant_model._get_name()}_int_tmp.onnx"
onnx.save(model, onnx_model_tmp)
# get output of each layer
if native_onnx:
run_ort = ort.InferenceSession(onnx_model_tmp, providers=ep_list)
ort_outs = run_ort.run(output_names, ort_inputs)
else: # vai QuantizeLinear/DequantizeLinear
# setup vai::QuantizeLinear and vai::DequantizeLinear
load_vai_ops()
# load tmp model: onnxruntime_extensions PyOrtFunction
run_ort = PyOrtFunction.from_model(onnx_model_tmp)
# onnxruntime_extensions outputs
run_ort._ensure_ort_session()
ort_outs = run_ort.ort_session.run(output_names, ort_inputs)
# dump layers
layer_dir = output_dir + "/onnx_layer_output/"
if not os.path.exists(layer_dir):
os.mkdir(layer_dir)
debug = False
for i in range(len(output_names)):
layer_name = output_names[i]
layer_data = ort_outs[i].astype("float32") # numpy
if debug: # permute, format txt
layer_data = permute(layer_data)
dump_path = layer_dir + layer_name + '.txt'
if not os.path.exists(os.path.dirname(dump_path)):
os.makedirs(os.path.dirname(dump_path))
layer_data.flatten().tofile(dump_path, sep='\n', format="%.6f")
else:
dump_path = layer_dir + layer_name + '.bin'
if not os.path.exists(os.path.dirname(dump_path)):
os.makedirs(os.path.dirname(dump_path))
layer_data.flatten().tofile(dump_path)
os.remove(onnx_model_tmp) # remove tmp model
NndctScreenLogger().info(f"ONNX model layers are dumped successfully.({layer_dir})")
# check ort accuracy
if not native_onnx and check_model:
check_dump_onnx(quantizer, ort_outs, inputs)
GLOBAL_MAP = GlobalMap()
class CmpFlag(Enum):
"""
Enum for comparison flags
"""
EQUAL = 0
LESS = 1
LESS_EQUAL = 2
GREATER = 3
GREATER_EQUAL = 4
NOT_EQUAL = 5
def compare_torch_version(compare_type:CmpFlag, version:str):
if compare_type == CmpFlag.EQUAL:
return LooseVersion(torch.__version__) == LooseVersion(version)
if compare_type == CmpFlag.LESS:
return LooseVersion(torch.__version__) < LooseVersion(version)
if compare_type == CmpFlag.LESS_EQUAL:
return LooseVersion(torch.__version__) <= LooseVersion(version)
if compare_type == CmpFlag.GREATER:
return LooseVersion(torch.__version__) > LooseVersion(version)
if compare_type == CmpFlag.GREATER_EQUAL:
return LooseVersion(torch.__version__) >= LooseVersion(version)
if compare_type == CmpFlag.NOT_EQUAL:
return LooseVersion(torch.__version__) != LooseVersion(version)
def get_opset_version():
if "_onnx_stable_opsets" in torch.onnx.symbolic_helper.__dict__:
return torch.onnx.symbolic_helper._onnx_stable_opsets[-1]
elif "onnx_stable_opsets" in torch.onnx._constants.__dict__:
return torch.onnx._constants.onnx_stable_opsets[-1]
elif "ONNX_MAX_OPSET" in torch.onnx._constants.__dict__:
return torch.onnx._constants.ONNX_MAX_OPSET
else:
raise RuntimeError("Onnx stable opset version is not found. Please check pytorch version (1.4.0 ~ 1.13.0)")
def get_module_name(module):
if isinstance(module, torch.jit.ScriptModule):
return module.original_name
def to_device(module, input_args, device):
if input_args is not None:
if isinstance(input_args, torch.Tensor):
input_args = input_args.to(device)
else:
is_tuple = True if isinstance(input_args, tuple) else False
input_args = list(input_args)
for i in range(len(input_args)):
_, inp = to_device(None, input_args[i], device)
input_args[i] = inp
if is_tuple:
input_args = tuple(input_args)
if module is not None:
module = module.to(device)
return module, input_args
def get_module_name(module):
if isinstance(module, torch.jit.ScriptModule):
module_name = module.original_name
else:
module_name = module._get_name()
from pytorch_nndct.parse.rich_in_out_helper import FlattenInOutModelForTrace
if module_name == FlattenInOutModelForTrace.getModelName():
if isinstance(module, torch.jit.ScriptModule):
origin_name = FlattenInOutModelForTrace.getOriginModelNameFormString(str(module.graph))
if origin_name is not None:
return origin_name
else:
return module_name
else:
names = [k for k in dir(module) if 'nndct_st_' in k]
if len(names) > 0:
return names[0]
return module_name
def export_onnx_runable_model(quantizer, example_inputs, input_tensors_name, return_tensors_name, convert_script_to_qscript, output_dir,
verbose=False, dynamic_batch=False, opset_version=None, native_onnx=True, dump_layers=False, check_model=False, opt_graph=False):
from torch.onnx import register_custom_op_symbolic
from torch.onnx.symbolic_helper import parse_args
import sys
import torch._C._onnx as _C_onnx
if compare_torch_version(CmpFlag.LESS, "1.7.0"):
NndctScreenLogger().error2user(QError.TORCH_VERSION, f'Only supprt exporting onnx model with pytorch 1.7 and later version.')
return
if quantizer.contain_channel_quantize():
if compare_torch_version(CmpFlag.LESS, "1.10.0"):
NndctScreenLogger().error2user(QError.TORCH_VERSION, f'Only supprt exporting per_channel quantization onnx model with pytorch 1.10 and later version.')
return
# per-tensor quantization
@parse_args("v", "i", "i", "f", "i", "i", "i", "i")
def symbolic_fix_neuron(g, input, valmin, valmax, valamp, zero_point, method, device_id, inplace):
if valamp < sys.float_info.min:
scale = torch.tensor(sys.float_info.max).float() # Avoid exportor generating double type
else:
scale = torch.tensor(1.0 / valamp).float() # Avoid exportor generating double type
zero_point = torch.tensor(0, dtype=torch.int8) # ONNX requires zero_point to be tensor
if not isinstance(input, torch._C.Value) or not isinstance(valmin, int) or not isinstance(valmax, int) \
or not isinstance(valamp, float) or zero_point.dtype != torch.int8 or not isinstance(method, int) \
or not isinstance(device_id, int) or not isinstance(inplace, int) or valamp <= 0.:
NndctScreenLogger().error2user(QError.FIX_INPUT_TYPE, f'Data type or value illegal fix neuron in when exporting onnx model.')
if compare_torch_version(CmpFlag.GREATER_EQUAL, "2.0.0"):
NndctOption.nndct_native_onnx.value = True
if NndctOption.nndct_native_onnx.value:
return g.op("DequantizeLinear", g.op("QuantizeLinear", input, scale, zero_point), scale, zero_point)
else:
return g.op("vai::DequantizeLinear", g.op("vai::QuantizeLinear", input, torch.tensor(valmin, dtype=torch.int32), torch.tensor(valmax, dtype=torch.int32), scale, zero_point, torch.tensor(method, dtype=torch.int8), torch.tensor(1, dtype=torch.int8)), scale, zero_point, torch.tensor(1, dtype=torch.int8))
# per-channel quantization
@parse_args("v", "i", "i", "v", "v", "i", "i", "i", "i")
def symbolic_fix_neuron_per_channel(g, input, valmin, valmax, scale, zero_point, axis, method, device_id, inplace):
if not isinstance(input, torch._C.Value) or not isinstance(valmin, int) or not isinstance(valmax, int) \
or not isinstance(scale, torch._C.Value) or not isinstance(zero_point, torch._C.Value) or not isinstance(method, int) \
or not isinstance(device_id, int) or not isinstance(inplace, int):
NndctScreenLogger().error2user(QError.FIX_INPUT_TYPE, f'Data type or value illegal fix neuron in when exporting onnx model.')
if compare_torch_version(CmpFlag.GREATER_EQUAL, "2.0.0"):
NndctOption.nndct_native_onnx.value = True
if NndctOption.nndct_native_onnx.value:
return g.op("DequantizeLinear", g.op("QuantizeLinear", input, scale, zero_point, axis_i=axis), scale, zero_point, axis_i=axis)
else:
return g.op("vai::DequantizeLinear", g.op("vai::QuantizeLinear", input, torch.tensor(valmin, dtype=torch.int32), torch.tensor(valmax, dtype=torch.int32), scale, zero_point, torch.tensor(method, dtype=torch.int8), torch.tensor(axis, dtype=torch.int8)), scale, zero_point, torch.tensor(axis, dtype=torch.int8))
NndctOption.nndct_native_onnx.value = native_onnx # use global value in the following
register_custom_op_symbolic("vai::fix_neuron", symbolic_fix_neuron, 9)
register_custom_op_symbolic("vai::fix_neuron_per_channel", symbolic_fix_neuron_per_channel, 10)
opset_version = NndctOption.nndct_onnx_opset_version.value if opset_version is None else opset_version
opset_version = get_opset_version() if opset_version==-1 else opset_version
device = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_DEVICE)
script_models = quantizer.scripts if len(quantizer.scripts) > 0 else None
quantizer.reset_status_for_exporting()
if dynamic_batch: # dynamic batch N in [N, C, L, H, W]
dynamic_axes = {}
for i in range(len(input_tensors_name)):
dynamic_axes[input_tensors_name[i]] = [0]
else:
dynamic_axes = None
# check onnx: backup batchnorm eps
batchnorm_eps_bak = {}
if check_model:
for module in quantizer.quant_model.modules():
if hasattr(module, "node") and module.node.op.type == NNDCT_OP.BATCH_NORM:
batchnorm_eps_bak[module.name] = module.eps
module.eps = 1.0e-5
if script_models is None:
output_file = os.path.join(output_dir, f"{quantizer.quant_model._get_name()}_int.onnx")
model, input_args = to_device(quantizer.quant_model, example_inputs, device)
try:
torch.onnx.export(quantizer.quant_model.eval(), input_args, output_file,
input_names=input_tensors_name,
verbose=verbose,
opset_version=opset_version,
dynamic_axes=dynamic_axes,
do_constant_folding=False)
except Exception as e:
NndctScreenLogger().error2user(QError.EXPORT_ONNX, f"The {get_module_name(quantizer.quant_model)} can not be exported for onnx. The PyTorch internal failed reason is:\n{str(e)}")
else:
NndctScreenLogger().info(f"{get_module_name(quantizer.quant_model)}_int.onnx is generated.({output_file})")
# optimize onnx graph
if opt_graph:
optimize_onnx_graph(output_file)
# dump layers
if dump_layers:
dump_onnx_layers(quantizer, output_dir, output_file, native_onnx, check_model) # custom ops
else:
if len(script_models) == 1:
inputs = [example_inputs]
inputs_name = [input_tensors_name]
outputs_name = [return_tensors_name]
else:
inputs = example_inputs
inputs_name = input_tensors_name
outputs_name = return_tensors_name
for script, inputs, input_name, output_name in zip(script_models, inputs, inputs_name, outputs_name):
q_model = convert_script_to_qscript(script, verbose=verbose)
_, inputs = to_device(None, inputs, device)
output_file = os.path.join(output_dir, f"{get_module_name(q_model)}_int.onnx")
try:
torch.onnx.export(q_model, inputs, output_file,
input_names=input_name,
verbose=verbose,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
except Exception as e:
NndctScreenLogger().error2user(QError.EXPORT_ONNX, f"The {get_module_name(q_model)} can not be exported for onnx. The PyTorch internal failed reason is:\n{str(e)}.")
else:
NndctScreenLogger().info(f"{get_module_name(q_model)}_int.onnx is generated.({output_file}).")
# optimize onnx graph
if opt_graph:
optimize_onnx_graph(output_file)
# check model or dump layers
if NndctOption.nndct_native_onnx.value: # native quant-dequant mode
return
elif dump_layers: # dump onnx layers
dump_onnx_layers(quantizer, output_dir, output_file, native_onnx, check_model)
# restore the value of batchnorm eps for quantizer.quant_model
if len(batchnorm_eps_bak) > 0:
for module in quantizer.quant_model.modules():
if module.name in batchnorm_eps_bak.keys():
module.eps = batchnorm_eps_bak[module.name] | null |
23,755 | import copy
import os
import torch
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning, set_option_value)
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from pytorch_nndct.utils.onnx_utils import get_opset_version
from pytorch_nndct.utils.module_util import visualize_tensors, to_device, get_module_name
from .utils import update_nndct_blob_data
import numpy as np
def round(x, method):
if method == 2: # half_up
y = copy.deepcopy(x)
y = np.where(y - np.floor(y) == 0.5, np.ceil(y), np.round(y))
elif method == 3: # c++ std::round: negative half_down, positive half_up
y = copy.deepcopy(x)
y = np.where(y < 0, np.ceil(y - 0.5), np.floor(y + 0.5))
elif method == 4: # floor
y = np.floor(x)
elif method == 5: # negative half_up, positive half_even
y = copy.deepcopy(x)
y = np.where((y < 0) & (y - np.floor(y) == 0.5), np.ceil(y), np.round(y))
elif method == 6: # negative half_up, positive half_down (vs method 3)
y = copy.deepcopy(x)
y = np.where((y < 0) & (y - np.floor(y) == 0.5), np.ceil(y), y)
y = np.where((y > 0) & (y - np.floor(y) == 0.5), np.floor(y), np.round(y))
elif method == 7: # up
y = np.ceil(x)
else: # half_even
y = np.round(x)
return y | null |
23,756 | import copy
import functools
import itertools
import types
import torch
from torch.utils._python_dispatch import _disable_current_modes
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.nndct_graph import (Block, Graph, Node,
convert_graph_to_block_node)
from nndct_shared.nndct_graph import operator_definition as base_op
import nndct_shared.utils as nndct_utils
from .ModuleHooker import ModuleHooker
from .utils import (prepare_quantizable_module, register_output_hook,
set_outputs_recorder_status)
from pytorch_nndct.quantization.torch_qconfig import TorchQConfig
from pytorch_nndct.quantization.fakequantizer import FakeQuantizer
from pytorch_nndct.quantization.torchquantizer import TORCHQuantizer
from nndct_shared.utils import (NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
from torch._dynamo.backends.common import fake_tensor_unsupported
def build_root_graph():
root_graph = Graph("root")
op = base_op.CustomOp(NNDCT_OP.PLACEHOLDER)
input_node = Node(name="input_placeholder", op=op, in_quant_part=False)
input_node.owning_graph = root_graph
op = base_op.CustomOp(NNDCT_OP.PLACEHOLDER)
return_node = Node(name="return_placeholder", op=op, in_quant_part=False)
return_node.owning_graph = root_graph
top_block = Block(root_graph, None, input_node, return_node)
root_graph.set_top_block(top_block)
return root_graph
def init_graph_counter():
counter = itertools.count(0)
GLOBAL_MAP.set_map(NNDCT_KEYS.GRAPH_COUNTER, counter)
def get_graph_id():
return next(GLOBAL_MAP.get_ele(NNDCT_KEYS.GRAPH_COUNTER))
def convert_gm_to_quantizable_module(gm, example_inputs, *, quantizer, root_graph, quant_models, graph_id, device):
def _get_name(self):
return f"GraphModule_{graph_id}"
if len(example_inputs) == 0:
return gm
gm._get_name = types.MethodType(_get_name, gm)
quant_module, graph = prepare_quantizable_module(module=gm,
input_args=tuple(example_inputs),
export_folder=quantizer.output_dir,
device=device,
connect_qm_with_graph=True)
if len(graph.all_blocks()) > 1:
quant_module.from_script(True)
else:
quant_module.from_script(False)
# if quant_mode > 1 and jit_compile is False:
# register_output_hook(quant_module, record_once=True)
# set_outputs_recorder_status(quant_module, True)
block_node = convert_graph_to_block_node(root_graph, graph)
if not block_node.in_node_list():
root_graph.append_node(block_node)
quant_info, history_quant_info = copy.deepcopy(quantizer.quant_config), copy.deepcopy(quantizer.config_history)
try:
quantizer.setup(root_graph, False, False, custom_quant_ops=None, dynamo=True)
except RuntimeError:
quant_module.eval()
ModuleHooker.hook_module_with_quantizer(quant_module, None)
else:
update_quant_config(quantizer, quant_info, history_quant_info)
quant_module.eval()
quant_models.append(quant_module)
ModuleHooker.hook_module_with_quantizer(quant_module, quantizer)
quantizer.quant_model = quant_models
return quant_module
def aot_module_quantize(quantizer, quant_mode, *, device):
root_graph = build_root_graph()
quant_model_lst = []
init_graph_counter()
@fake_tensor_unsupported
def quantizable_forward(gm, example_inputs):
# TODO: Try to avoid to generate a new graph without any modification
graph_id = get_graph_id()
quant_module = convert_gm_to_quantizable_module(gm,
example_inputs,
quantizer=quantizer,
root_graph=root_graph,
quant_models=quant_model_lst,
graph_id=graph_id,
device=device)
@torch.no_grad()
def run(*args):
result = quant_module.forward(*args)
# There is a unpack sequence instruction after compiled fn in bytecode, so we need to construct a tuple for results of compiled fn
if not isinstance(result, tuple):
return (result, )
else:
return result
return run
return quantizable_forward | null |
23,757 | import copy
import functools
from functools import partial
import os
import numpy as np
import torch
import nndct_shared.utils as nndct_utils
from nndct_shared.quantization import quantize_data2int
import pytorch_nndct.parse.torch_op_def as torch_op_def
import pytorch_nndct.utils.tensor_util as py_tensor_util
from pytorch_nndct.utils.module_util import to_device, collect_input_devices, get_flattened_input_args
from nndct_shared.base import NNDCT_OP, GLOBAL_MAP, NNDCT_KEYS
from nndct_shared.nndct_graph import Graph
from nndct_shared.utils import NndctScreenLogger, NndctOption, permute_data, permute_axes, QError, QWarning, QNote
from pytorch_nndct.utils.module_util import get_module_name
from typing import Sequence
def _is_module_hooked(module):
for m in module.children():
if hasattr(m, "node") and m.node:
return True
return False | null |
23,758 | import torch
_FLOAT_32 = 4
_GB = 1024 * _MB
def tensor_size(tensor):
assert isinstance(tensor, torch.Tensor)
return torch.numel(tensor) * _FLOAT_32 / _GB | null |
23,759 | import torch
_FLOAT_32 = 4
_GB = 1024 * _MB
def tensor_size_by_num(num):
return num * _FLOAT_32 / _GB | null |
23,760 | import copy
import os
from typing import Any, List, Optional, Sequence, Union
import re
import torch
from pytorch_nndct.version import __version__, Ctorch_version
import nndct_shared.utils as nndct_utils
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.compile import CompilerFactory, DeployChecker
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
from nndct_shared.nndct_graph import Graph, Node, Block, convert_graph_to_block_node
from nndct_shared.nndct_graph import operator_definition as base_op
from pytorch_nndct.quantization import (FakeQuantizer, TorchQConfig,
TORCHQuantizer)
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from pytorch_nndct.utils.onnx_utils import get_opset_version
from pytorch_nndct.utils.module_util import visualize_tensors, to_device, get_module_name
from nndct_shared.compile import get_xmodel_and_dump_infos
from nndct_shared.quantization.fix_pos_adjust import FixPosInserter
from .adaquant import AdvancedQuantProcessor
from .utils import (connect_module_with_graph, disconnect_modeule_with_graph,
get_deploy_graph_list, insert_fix_neuron_in_script_model,
opt_script_model_for_quant, prepare_quantizable_module,
register_output_hook, set_outputs_recorder_status,
update_nndct_blob_data, quant_model_inferenced,
insert_mul_after_avgpool,
remove_quant_dequant_stub)
from .ModuleHooker import ModuleHooker
from .onnx import export_onnx_model_for_lstm, export_onnx_runable_model
from pytorch_nndct.parse.rich_in_out_helper import RecoveryModel, StandardInputData, flatten_to_tuple
GLOBAL_MAP = GlobalMap()
class FixPosInserter(object):
_instance_lock = threading.Lock()
def __init__(self, ctx):
self.ctx = ctx
self.insert_ouput_fn_map_with_filter = {
NNDCT_OP.CONCAT: [(self.insert_before_concat, lambda *args: NndctOption.nndct_insert_concat_input_fix.value)],
NNDCT_OP.HSWISH: [self.insert_after_hswish]
}
self.__call_time = 0
def __new__(cls, *args, **kwargs):
if not hasattr(FixPosInserter, "_instance"):
with FixPosInserter._instance_lock:
if not hasattr(FixPosInserter, "_instance"):
FixPosInserter._instance = object.__new__(cls)
return FixPosInserter._instance
def insert_before_concat(self, node, filter_fn):
if filter_fn(self.ctx, node):
graph = node.owning_graph
children = graph.children(node)[0]
if self.ctx.configer.is_node_quantizable(children, self.ctx.lstm):
bnfp = self.get_quant_output_config(children)
self.insert_local_quant_config(node, bnfp)
def insert_after_hswish(self, node):
if self.get_quant_output_config(node) is None:
end_node = self.ctx.configer.quant_output(node)
bnfp = self.get_quant_output_config(end_node)
if bnfp is not None:
self.insert_local_quant_config(node, bnfp)
def __call__(self, graph):
if self.__call_time > 0:
return
self.__call_time = self.__call_time + 1
# from head to tail for aligning with input OPs
for node in graph.all_nodes():
# if not self.ctx.configer.is_node_quantizable(node, self.ctx.lstm) \
# or not self.ctx.configer.is_concat_input(node.name):
# continue
if self.ctx.configer.is_concat_input(node.name):
fn_list = self.insert_ouput_fn_map_with_filter.get(NNDCT_OP.CONCAT, None)
if fn_list is not None:
for adjust_fn, filter_fn in fn_list:
adjust_fn(node, filter_fn)
if node.op.type == NNDCT_OP.HSWISH:
fn_list = self.insert_ouput_fn_map_with_filter.get(NNDCT_OP.HSWISH, None)
if fn_list is not None:
for adjust_fn in fn_list:
adjust_fn(node)
def get_quant_output_config(self, node):
if self.ctx.quant_config and node.name in self.ctx.quant_config['output']:
return self.ctx.get_quant_config(node.name, False)
else:
return None
def insert_local_quant_config(self, node, bnfp):
self.ctx.insert_local_quant_config(node.name, bnfp)
def get_deploy_graph_list(quant_model, nndct_graph, need_partition=True):
if isinstance(quant_model, list):
graph_list = []
for node in nndct_graph.nodes:
if node.op.type == NNDCT_OP.BLOCK:
graph_list.append(convert_block_node_to_graph(node))
assert len(quant_model) == len(graph_list)
deploy_graphs = []
for model, graph in zip(quant_model, graph_list):
_, dev_graph = _deploy_optimize(model, graph, False)
deploy_graphs.append(dev_graph)
return [deploy_graphs], None
else:
return _deploy_optimize(quant_model, nndct_graph, need_partition)
The provided code snippet includes necessary dependencies for implementing the `dump_xmodel` function. Write a Python function `def dump_xmodel(output_dir="quantize_result", deploy_check=False, lstm_app=False)` to solve the following problem:
r"""converts module to xmodel for deployment compilation only works when quantm model = 2. The xmodel and some checking data will be generated under work dir. Args: deploy_check(bool): if true, can dump blobs and parameters of model for deployment verification Returns: None
Here is the function:
def dump_xmodel(output_dir="quantize_result", deploy_check=False, lstm_app=False):
r"""converts module to xmodel for deployment
compilation only works when quantm model = 2.
The xmodel and some checking data will be generated under work dir.
Args:
deploy_check(bool): if true, can dump blobs and parameters of model for deployment verification
Returns:
None
"""
quantizer = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
fixposinserter = FixPosInserter(quantizer)
fixposinserter(quantizer.Nndctgraph)
if quantizer and quantizer.quant_mode > 1:
nndct_utils.create_work_dir(output_dir)
# compile to xmodel
compiler = CompilerFactory.get_compiler("xmodel")
NndctScreenLogger().info("=>Converting to xmodel ...")
deploy_graphs, _ = get_deploy_graph_list(quantizer.quant_model, quantizer.Nndctgraph)
xmodel_depoly_infos, dump_deploy_infos = get_xmodel_and_dump_infos(quantizer, deploy_graphs)
if not lstm_app:
for node in xmodel_depoly_infos[0].dev_graph.nodes:
error_out = False
if node.op.type not in [NNDCT_OP.INPUT, NNDCT_OP.QUANT_STUB]:
continue
for i, tensor in enumerate(node.out_tensors):
if tensor.shape and tensor.shape[0] != 1:
NndctScreenLogger().error2user(QError.XMODEL_BATCHSIZE, f"Batch size must be 1 when exporting xmodel.")
error_out = True
break
if error_out:
break
for depoly_info in dump_deploy_infos:
# dump data for accuracy check
if deploy_check:
# sync data
NndctScreenLogger().info(f"=>Dumping '{depoly_info.dev_graph.name}'' checking data...")
# connect_module_with_graph(quantizer.quant_model, depoly_info.dev_graph, recover_param=False)
# update_nndct_blob_data(quantizer.quant_model, depoly_info.dev_graph)
# connect_module_with_graph(quantizer.quant_model, quantizer.Nndctgraph, recover_param=False)
last_only = os.getenv('NNDCT_ONLY_DUMP_LAST')
if lstm_app:
checker = DeployChecker(output_dir_name=output_dir, data_format='txt')
checker.update_dump_folder(f"{depoly_info.dev_graph.name}/frame_0")
select_batch = True
else:
checker = DeployChecker(output_dir_name=output_dir, last_only=(last_only is not None))
checker.update_dump_folder(f"{depoly_info.dev_graph.name}")
select_batch = False
checker.quant_data_for_dump(quantizer, depoly_info)
checker.dump_nodes_output(
depoly_info.dev_graph,
depoly_info.quant_info,
round_method=quantizer.quant_opt['round_method'], select_batch=select_batch)
NndctScreenLogger().info(f"=>Finish dumping data.({checker.dump_folder})")
if quantizer.quant_strategy_info['target_device'] == "DPU":
for depoly_info in xmodel_depoly_infos:
try:
valid, msg = compiler.verify_nndct_graph(depoly_info.dev_graph)
if not valid:
NndctScreenLogger().warning2user(QWarning.CONVERT_XMODEL, f"""Convert '{depoly_info.dev_graph.name}' to xmodel failed with following reasons:\n{msg}""")
continue
xgraph = compiler.do_compile(
depoly_info.dev_graph,
quant_config_info=depoly_info.quant_info,
output_file_name=os.path.join(output_dir, depoly_info.dev_graph.name))
except AddXopError as e:
NndctScreenLogger().error2user(QError.EXPORT_XMODEL, f"Failed convert graph '{depoly_info.dev_graph.name}' to xmodel.")
raise e
compiler.verify_xmodel(depoly_info.dev_graph, xgraph)
else:
NndctScreenLogger().warning2user(QWarning.XMODEL_DEVICE, f"Not support to dump xmodel when target device is not DPU.") | r"""converts module to xmodel for deployment compilation only works when quantm model = 2. The xmodel and some checking data will be generated under work dir. Args: deploy_check(bool): if true, can dump blobs and parameters of model for deployment verification Returns: None |
23,761 | import copy
import os
from typing import Any, List, Optional, Sequence, Union
import re
import torch
from pytorch_nndct.version import __version__, Ctorch_version
import nndct_shared.utils as nndct_utils
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.compile import CompilerFactory, DeployChecker
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
from nndct_shared.nndct_graph import Graph, Node, Block, convert_graph_to_block_node
from nndct_shared.nndct_graph import operator_definition as base_op
from pytorch_nndct.quantization import (FakeQuantizer, TorchQConfig,
TORCHQuantizer)
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from pytorch_nndct.utils.onnx_utils import get_opset_version
from pytorch_nndct.utils.module_util import visualize_tensors, to_device, get_module_name
from nndct_shared.compile import get_xmodel_and_dump_infos
from nndct_shared.quantization.fix_pos_adjust import FixPosInserter
from .adaquant import AdvancedQuantProcessor
from .utils import (connect_module_with_graph, disconnect_modeule_with_graph,
get_deploy_graph_list, insert_fix_neuron_in_script_model,
opt_script_model_for_quant, prepare_quantizable_module,
register_output_hook, set_outputs_recorder_status,
update_nndct_blob_data, quant_model_inferenced,
insert_mul_after_avgpool,
remove_quant_dequant_stub)
from .ModuleHooker import ModuleHooker
from .onnx import export_onnx_model_for_lstm, export_onnx_runable_model
from pytorch_nndct.parse.rich_in_out_helper import RecoveryModel, StandardInputData, flatten_to_tuple
def vaiq_system_info(device):
# Check device available
if device.type == "cuda":
if not (torch.cuda.is_available() and ("CUDA_HOME" or "ROCM_HOME" in os.environ)):
device = torch.device("cpu")
NndctScreenLogger().warning2user(QWarning.CUDA_UNAVAILABLE, f"CUDA (HIP) is not available, change device to CPU")
NndctScreenLogger().check2user(QError.TORCH_VERSION, f"Installed pytorch version is {torch.__version__}, \
not consistent with pytorch version when compiling quantizer ({Ctorch_version})",
torch.__version__ in __version__)
import platform as pf
import sys
long_ver = sys.version
py_ver = '3.x'
gcc_ver = 'x.x'
result = re.findall(r'\d+.\d+.\d+', long_ver)
if len(result) > 0:
py_ver = result[0]
result = re.findall(r'GCC \d+.\d+.\d+', long_ver)
if len(result) > 0:
gcc_ver = result[0]
NndctScreenLogger().info(f'OS and CPU information:\n\
system --- {pf.system()}\n\
node --- {pf.node()}\n\
release --- {pf.release()}\n\
version --- {pf.version()}\n\
machine --- {pf.machine()}\n\
processor --- {pf.processor()}')
NndctScreenLogger().info(f'Tools version information:\n\
GCC --- {gcc_ver}\n\
python --- {py_ver}\n\
pytorch --- {torch.__version__}\n\
vai_q_pytorch --- {__version__}')
if device == torch.device('cuda'):
NndctScreenLogger().info(f'GPU information:\n\
device name --- {torch.cuda.get_device_name()}\n\
device available --- {torch.cuda.is_available()}\n\
device count --- {torch.cuda.device_count()}\n\
current device --- {torch.cuda.current_device()}') | null |
23,762 | import copy
import os
from typing import Any, List, Optional, Sequence, Union
import re
import torch
from pytorch_nndct.version import __version__, Ctorch_version
import nndct_shared.utils as nndct_utils
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.compile import CompilerFactory, DeployChecker
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
from nndct_shared.nndct_graph import Graph, Node, Block, convert_graph_to_block_node
from nndct_shared.nndct_graph import operator_definition as base_op
from pytorch_nndct.quantization import (FakeQuantizer, TorchQConfig,
TORCHQuantizer)
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from pytorch_nndct.utils.onnx_utils import get_opset_version
from pytorch_nndct.utils.module_util import visualize_tensors, to_device, get_module_name
from nndct_shared.compile import get_xmodel_and_dump_infos
from nndct_shared.quantization.fix_pos_adjust import FixPosInserter
from .adaquant import AdvancedQuantProcessor
from .utils import (connect_module_with_graph, disconnect_modeule_with_graph,
get_deploy_graph_list, insert_fix_neuron_in_script_model,
opt_script_model_for_quant, prepare_quantizable_module,
register_output_hook, set_outputs_recorder_status,
update_nndct_blob_data, quant_model_inferenced,
insert_mul_after_avgpool,
remove_quant_dequant_stub)
from .ModuleHooker import ModuleHooker
from .onnx import export_onnx_model_for_lstm, export_onnx_runable_model
from pytorch_nndct.parse.rich_in_out_helper import RecoveryModel, StandardInputData, flatten_to_tuple
def _check_args(module):
if not isinstance(module, torch.nn.Module):
raise TypeError(f"Type of 'module' should be 'torch.nn.Module'.") | null |
23,763 | import copy
import os
from typing import Any, List, Optional, Sequence, Union
import re
import torch
from pytorch_nndct.version import __version__, Ctorch_version
import nndct_shared.utils as nndct_utils
from nndct_shared.base import GLOBAL_MAP, NNDCT_KEYS, NNDCT_OP
from nndct_shared.compile import CompilerFactory, DeployChecker
from nndct_shared.utils import (AddXopError, NndctOption, NndctScreenLogger,
option_util, QError, QWarning)
from nndct_shared.nndct_graph import Graph, Node, Block, convert_graph_to_block_node
from nndct_shared.nndct_graph import operator_definition as base_op
from pytorch_nndct.quantization import (FakeQuantizer, TorchQConfig,
TORCHQuantizer)
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
from pytorch_nndct.utils.onnx_utils import get_opset_version
from pytorch_nndct.utils.module_util import visualize_tensors, to_device, get_module_name
from nndct_shared.compile import get_xmodel_and_dump_infos
from nndct_shared.quantization.fix_pos_adjust import FixPosInserter
from .adaquant import AdvancedQuantProcessor
from .utils import (connect_module_with_graph, disconnect_modeule_with_graph,
get_deploy_graph_list, insert_fix_neuron_in_script_model,
opt_script_model_for_quant, prepare_quantizable_module,
register_output_hook, set_outputs_recorder_status,
update_nndct_blob_data, quant_model_inferenced,
insert_mul_after_avgpool,
remove_quant_dequant_stub)
from .ModuleHooker import ModuleHooker
from .onnx import export_onnx_model_for_lstm, export_onnx_runable_model
from pytorch_nndct.parse.rich_in_out_helper import RecoveryModel, StandardInputData, flatten_to_tuple
def _init_quant_env(quant_mode, output_dir, quant_strategy_info, is_lstm=False):
if isinstance(quant_mode, int):
NndctScreenLogger().warning(f"quant_mode will not support integer value in future version. It supports string values 'calib' and 'test'.")
qmode = quant_mode
elif isinstance(quant_mode, str):
if quant_mode == 'calib':
qmode = 1
elif quant_mode == 'test':
qmode = 2
else:
NndctScreenLogger().warning(f"quant_mode supported values are 'calib' and 'test'. Change it to 'calib' as calibration mode.")
qmode = 1
else:
NndctScreenLogger().warning(f"quant_mode supported values are string 'calib' and 'test'. Change it to 'calib' as calibration mode.")
qmode = 1
if NndctOption.nndct_quant_mode.value > 0:
qmode = NndctOption.nndct_quant_mode.value
if qmode == 1:
NndctScreenLogger().info(f"Quantization calibration process start up...")
elif qmode == 2:
NndctScreenLogger().info(f"Quantization test process start up...")
target_device = quant_strategy_info['target_device']
if target_device == 'DPU':
quantizer = TORCHQuantizer.create_from_strategy(qmode,
output_dir,
quant_strategy_info,
is_lstm=is_lstm)
elif target_device in ['CPU', 'GPU']:
quantizer = FakeQuantizer.create_from_strategy(qmode,
output_dir,
quant_strategy_info,
is_lstm=is_lstm)
return quantizer, qmode | null |
23,764 | from torch import nn
from typing import Any, List, Mapping
import os
import torch
import torch.onnx
from nndct_shared.expanding.spec import ExpandingSpec
from nndct_shared.pruning import logging
from pytorch_nndct.expanding.structured import ExpandingRunner
from pytorch_nndct.utils import profiler
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
class ExpandingRunner(object):
def __init__(self, model: Module, input_signature: torch.Tensor) -> None:
def expand_from_spec(self, expanding_spec: ExpandingSpec) -> Module:
def expand(self, channel_divisible: int=2, nodes_to_exclude: List[str]=[]) -> Tuple[Module, ExpandingSpec]:
def summary_before_and_after_padding(model: nn.Module,
input_signature: torch.Tensor,
channel_divisible: int) -> None:
logging.info("before padding:")
profiler.model_complexity(
model, input_signature, readable=True, print_model_analysis=True)
expanding_runner = ExpandingRunner(model.cpu(), input_signature)
expanded_model, _ = expanding_runner.expand(channel_divisible)
logging.info("after padding:")
profiler.model_complexity(
expanded_model, input_signature, readable=True, print_model_analysis=True) | null |
23,765 | from torch import nn
from pytorch_nndct.nn import quantization as nnq
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.utils import fusion
from pytorch_nndct.utils import module_util as mod_util
The provided code snippet includes necessary dependencies for implementing the `quantize_conv_bn` function. Write a Python function `def quantize_conv_bn(conv, bn, spec)` to solve the following problem:
Given the conv and bn modules, fuses them and returns the fused module Args: conv: Module instance of (nn.Conv2d, nn.Conv3d) bn: nn.BatchNorm2d or nn.BatchNorm3d instance that needs to be fused with the conv spec: Runtime specification used to initialize the fused module. Return: The fused module.
Here is the function:
def quantize_conv_bn(conv, bn, spec):
"""Given the conv and bn modules, fuses them and returns the fused module
Args:
conv: Module instance of (nn.Conv2d, nn.Conv3d)
bn: nn.BatchNorm2d or nn.BatchNorm3d instance that needs
to be fused with the conv
spec: Runtime specification used to initialize the fused module.
Return:
The fused module.
"""
foldable_patterns = {
(nn.Conv2d, nn.BatchNorm2d):
nnq.QuantizedConvBatchNorm2d,
(nn.Conv3d, nn.BatchNorm3d):
nnq.QuantizedConvBatchNorm3d,
(nn.ConvTranspose2d, nn.BatchNorm2d):
nnq.QuantizedConvTransposeBatchNorm2d,
(nn.ConvTranspose3d, nn.BatchNorm3d):
nnq.QuantizedConvTransposeBatchNorm3d,
}
cls_pair = (type(conv), type(bn))
assert cls_pair in foldable_patterns
assert(conv.training == bn.training),\
"Conv and BN both must be in the same mode (train or eval)."
assert conv.training
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
fused_cls = foldable_patterns[cls_pair]
#print('fuse', cls_pair, '->', fused_cls)
return fused_cls.from_float(conv, bn, spec) | Given the conv and bn modules, fuses them and returns the fused module Args: conv: Module instance of (nn.Conv2d, nn.Conv3d) bn: nn.BatchNorm2d or nn.BatchNorm3d instance that needs to be fused with the conv spec: Runtime specification used to initialize the fused module. Return: The fused module. |
23,766 | import abc
from torch import nn
from torch import Tensor
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.commander import OptimizeCommander
from nndct_shared.utils import registry
from pytorch_nndct import parse
from pytorch_nndct.nn import quantization as nnq
from pytorch_nndct.nn.nonlinear import mode
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform as mt
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
class BFPQuantizeScheme(QuantizeScheme):
def __init__(self, model, graph, config):
super(BFPQuantizeScheme, self).__init__(model, graph, config)
prime_mode_to_quantizer = {
None: nnq.BFPQuantizer,
'normal': nnq.BFPPrimeQuantizer,
'shared': nnq.BFPPrimeSharedQuantizer
}
if config.bfp.prime.mode not in prime_mode_to_quantizer:
raise ValueError(
f'BFP prime mode must be one of {prime_mode_to_quantizer.keys()}, but given {config.bfp.prime.mode}'
)
self.quantizer_cls = prime_mode_to_quantizer[config.bfp.prime.mode]
self.bfp_args = self._bfp_args_from_runtime_config()
def _bfp_args_from_runtime_config(self):
args = {
'bitwidth': self.config.bfp.bitwidth,
'block_size': self.config.bfp.block_size,
'rounding_mode': self.config.bfp.rounding_mode,
}
if self.config.bfp.prime.mode == 'shared':
args['sub_block_size'] = self.config.bfp.prime.sub_block_size
args['sub_block_shift_bits'] = self.config.bfp.prime.sub_block_shift_bits
return args
def get_transforms(self):
return [
transforms_mod.QuantizeConv2dBatchNorm(),
transforms_mod.QuantizeConv3dBatchNorm(),
transforms_mod.QuantizeConvNd(),
transforms_mod.QuantizeLinear(),
#transforms_mod.QuantizeBatchNormNd(),
transforms_mod.ReplaceSoftmax(),
transforms_mod.ReplaceSigmoid(),
transforms_mod.ReplaceTanh(),
transforms_mod.ReplaceGELU(),
transforms_mod.ReplaceLayerNorm(),
]
def get_runtime_specification(self):
node_to_spec = {}
for node in self.graph.nodes:
spec = self.default_spec()
generator = _spec_generator.get(node.op.type, None)
if generator:
generator(self, spec)
node_to_spec[node.name] = spec
return node_to_spec
def default_spec(self):
return config_mod.LayerRuntimeSpec(self.config)
def conv_spec(self, spec):
spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args, axis=1))
spec.add_weight_quantizer('weight',
self.quantizer_cls(**self.bfp_args, axis=1))
spec.add_weight_quantizer('bias', nn.Identity())
def depthwise_conv_spec(self, spec):
spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args, axis=1))
spec.add_weight_quantizer('weight',
self.quantizer_cls(**self.bfp_args, axis=1))
spec.add_weight_quantizer('bias', nn.Identity())
def conv_transpose_spec(self, spec):
spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args, axis=1))
spec.add_weight_quantizer('weight',
self.quantizer_cls(**self.bfp_args, axis=1))
spec.add_weight_quantizer('bias', nn.Identity())
#@RegisterRuntimeSpec(OpTypes.BATCH_NORM)
#def depthwise_conv_spec(self, spec):
# spec.add_input_quantizer(nnq.BFloat16Quantizer())
# spec.add_weight_quantizer('weight', nnq.BFloat16Quantizer())
# spec.add_weight_quantizer('bias', nnq.BFloat16Quantizer())
# spec.add_weight_quantizer('running_mean', nnq.BFloat16Quantizer())
# spec.add_weight_quantizer('running_var', nnq.BFloat16Quantizer())
def linear_spec(self, spec):
spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args, axis=-1))
spec.add_weight_quantizer('weight',
self.quantizer_cls(**self.bfp_args, axis=-1))
spec.add_weight_quantizer('bias', nn.Identity())
def adaptive_avg_pool_spec(self, spec):
# input: (N, C, H, W) or (C, H, W)
spec.add_input_quantizer(nnq.BFloat16Quantizer())
#@RegisterRuntimeSpec(OpTypes.MULTIPLY)
#def multiply_spec(self, spec):
# spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args))
# spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args))
def matmul_spec(self, spec):
spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args, axis=-1))
spec.add_input_quantizer(self.quantizer_cls(**self.bfp_args, axis=-2))
spec.add_output_quantizer(nnq.FP32Quantizer())
def gelu_spec(self, spec):
if mode.is_no_approx(self.config.non_linear_approx.mode):
spec.add_input_quantizer(nnq.BFloat16Quantizer())
def sigmoid_spec(self, spec):
if mode.is_no_approx(self.config.non_linear_approx.mode):
spec.add_input_quantizer(nnq.BFloat16Quantizer())
def softmax_spec(self, spec):
if mode.is_no_approx(self.config.non_linear_approx.mode):
spec.add_input_quantizer(nnq.BFloat16Quantizer())
def tanh_spec(self, spec):
if mode.is_no_approx(config.non_linear_approx.mode):
spec.add_input_quantizer(nnq.BFloat16Quantizer())
def layernorm_spec(self, spec):
if mode.is_no_approx(self.config.non_linear_approx.mode):
spec.add_input_quantizer(nnq.BFloat16Quantizer())
spec.add_weight_quantizer('weight', nnq.BFloat16Quantizer())
def _get_graph(model, inputs):
return parse.TorchParser()(model._get_name(), model, inputs)
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def quantize_model(model: nn.Module,
inputs: Tuple[Tensor, ...],
dtype: str = 'mx6',
config_file: str = None) -> nn.Module:
if config_file:
logging.info(f'Loading config from {config_file}')
rt_config = config_mod.RuntimeConfig.from_yaml(config_file)
else:
logging.info(f'Getting config for {dtype}')
rt_config = config_mod.get(dtype)
logging.info('RuntimeConfig: {}'.format(rt_config))
graph = _get_graph(model, inputs)
#if not runtime_config.training:
# model, graph = equalize_weights_cross_conv_layers(model, inputs)
scheme = BFPQuantizeScheme(model, graph, rt_config)
return scheme.apply() | null |
23,767 | import abc
from torch import nn
from torch import Tensor
from typing import Any, NoReturn, Optional, Sequence, Tuple, Union, List
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.commander import OptimizeCommander
from nndct_shared.utils import registry
from pytorch_nndct import parse
from pytorch_nndct.nn import quantization as nnq
from pytorch_nndct.nn.nonlinear import mode
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform as mt
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
def _get_graph(model, inputs):
return parse.TorchParser()(model._get_name(), model, inputs)
def _attach_node_to_model(model, graph):
for node in graph.nodes:
module = mod_util.get_module_by_node(model, node)
if module:
module.node = node
def _detach_node_from_model(model):
def delete_node(module):
if hasattr(module, 'node'):
del module.node
model.apply(delete_node)
class OptimizeCommander(object):
def __init__(self, graph):
self._graph = graph
self.graph_depth = graph.get_graph_depth()
if NndctOption.nndct_traversal_graph_mode.value == 0:
if self.graph_depth > 900:
self._collect_layer_groups = self._collect_layer_groups_iteration
else:
self._collect_layer_groups = self._collect_layer_groups_recursion
if NndctOption.nndct_traversal_graph_mode.value == 1:
self._collect_layer_groups = self._collect_layer_groups_recursion
if NndctOption.nndct_traversal_graph_mode.value == 2:
self._collect_layer_groups = self._collect_layer_groups_iteration
def SetNegativeSlope(self):
for node in self._graph.nodes:
if node.op.type == NNDCT_OP.LEAKY_RELU:
quant_mode, _ = maybe_get_quantizer()
if quant_mode is None or NndctOption.nndct_quant_off.value:
NndctScreenLogger().warning2user_once(QWarning.LEAKYRELU, f"Preserve negative_slope({node.node_attr(node.op.AttrName.ALPHA)}) of LeakyReLU without quantization.")
else:
if node.node_attr(node.op.AttrName.ALPHA) != 0.1015625:
NndctScreenLogger().warning2user_once(QWarning.LEAKYRELU, f"Force to change negative_slope of LeakyReLU from {node.node_attr(node.op.AttrName.ALPHA)} to 0.1015625 because DPU only supports this value. It is recommended to change all negative_slope of LeakyReLU to 0.1015625 and re-train the float model for better deployed model accuracy.")
node.set_node_attr(node.op.AttrName.ALPHA, 0.1015625)
def ConvertBNParams(self):
for node in self._graph.nodes:
if node.op.type in _BN_TYPES:
NndctScreenLogger().info2user(QNote.NOT_FUSED_BN, f"Node {node.name} cannot be fused into CONV layers, this is not quantization friendly. It is recommended to adjsut the pattern to CONV+BN.")
gamma = node.op.params[node.op.ParamName.GAMMA].data
beta = node.op.params[node.op.ParamName.BETA].data
mean = node.op.params[node.op.ParamName.MOVING_MEAN].data
var = node.op.params[node.op.ParamName.MOVING_VAR].data
epsilon = node.node_attr(node.op.AttrName.EPSILON)
scale = gamma / np.sqrt(var + epsilon)
offset = beta - mean * scale
new_mean = mean.copy()
new_var = var.copy()
new_mean.fill(0)
new_var.fill(1)
node.set_node_attr(node.op.AttrName.EPSILON, 0.0)
node.op.set_param_from_data(node.op.ParamName.GAMMA, scale)
node.op.set_param_from_data(node.op.ParamName.BETA, offset)
node.op.set_param_from_data(node.op.ParamName.MOVING_MEAN, new_mean)
node.op.set_param_from_data(node.op.ParamName.MOVING_VAR, new_var)
def FuseEmbedLnActv(self):
fuse_embed_handler = EmbedLnActvHandler()
graph_searcher = GraphSearcher(self._graph)
node_sets = graph_searcher.find_nodes_from_type(
[PatternType(pattern=[NNDCT_OP.EMBEDDING, NNDCT_OP.LAYER_NORM, NNDCT_OP.SIGMOID],
action=fuse_embed_handler),
PatternType(pattern=[NNDCT_OP.EMBEDDING, NNDCT_OP.LAYER_NORM, NNDCT_OP.TANH],
action=fuse_embed_handler)])
removed_nodes = set()
for id, node_list in node_sets.items():
for nodeset in node_list:
ln_node = nodeset[1]
if ln_node.merged and ln_node not in removed_nodes:
self._graph.remove_node(ln_node)
removed_nodes.add(ln_node)
actv_node = nodeset[2]
if actv_node.merged and actv_node not in removed_nodes:
self._graph.remove_node(actv_node)
removed_nodes.add(actv_node)
def FuseBnToConv(self):
# find fusable bathnorm node
fuse_bn_handler = ConvBnHandler()
graph_searcher = GraphSearcher(self._graph)
node_sets = graph_searcher.find_nodes_from_type(
[PatternType(pattern=[NNDCT_OP.CONV2D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONV2D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.CONV3D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONV3D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.CONVTRANSPOSE3D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.CONV2D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONV2D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.CONV3D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONV3D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.CONVTRANSPOSE3D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
PatternType(pattern=[NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D, NNDCT_OP.CONCAT, NNDCT_OP.BATCH_NORM],
action=fuse_bn_handler),
])
removed_bn = set()
for id, node_list in node_sets.items():
for nodeset in node_list:
bn_node = nodeset[-1]
if bn_node.merged and bn_node not in removed_bn:
self._graph.remove_node(bn_node)
removed_bn.add(bn_node)
def DecoupleSharedParamsInConv(self):
# decouple shared parameters in graph
bias_tensor_list = []
weight_tensor_list = []
for node in self._graph.nodes:
if node.op.type in _CONV_TYPES:
weight_tensor = node.op.params[node.op.ParamName.WEIGHTS]
weight_name = weight_tensor.name
if weight_name in weight_tensor_list:
node_idx = node.idx
weight_name_copy = weight_name + '.' + str(node_idx)
new_weight_tensor = Tensor(name=weight_name_copy)
new_weight_tensor.clone_from(weight_tensor)
node.op.set_param(node.op.ParamName.WEIGHTS, new_weight_tensor)
else:
weight_tensor_list.append(weight_name)
if node.node_attr(node.op.AttrName.BIAS_TERM):
bias_tensor = node.op.params[node.op.ParamName.BIAS]
bias_name = bias_tensor.name
if bias_name in bias_tensor_list:
node_idx = node.idx
bias_name_copy = bias_name + '.' + str(node_idx)
new_bias_tensor = Tensor(name=bias_name_copy)
new_bias_tensor.clone_from(bias_tensor)
node.op.set_param(node.op.ParamName.BIAS, new_bias_tensor)
else:
bias_tensor_list.append(bias_name)
def equalize_weights_cross_conv_layers(self):
r"""
This function re-implements the weight equalization technique proposed in the following paper.
"Markus Nagel et al., Data-Free Quantization through Weight Equalization and Bias Correction",
arXiv:1906.04721, 2019."
"""
conv_layer_groups = []
# node = self._graph.get_node_by_idx(0)
#node = list(self._graph.nodes)[0]
input_nodes = self._graph.get_input_nodes()
# build conv_layer_groups
for node in input_nodes:
self._collect_layer_groups(node, conv_layer_groups)
'''
for id, group in enumerate(conv_layer_groups, 1):
print(f"\ngroup{id}:")
print("{")
for n in group:
print(f" {n.name}")
print("}")
'''
# generate equalized_group
equalized_groups = self._collect_equalized_groups(conv_layer_groups)
'''
for id, group in enumerate(equalized_groups, 1):
print(f"\ngroup{id}:")
print("{")
for n in group:
print(f" {n.name}")
print("}")
'''
# do equalization
for i in range(1):
cle_info_set = self._cross_layer_equalization_for_conv(equalized_groups)
return self._graph
def _collect_layer_groups_iteration(self, node_in, conv_layer_groups_in, visited_in=None, conv_group_in=None):
def _gather_conv_group(group):
if len(group) > 1 and group not in conv_layer_groups:
conv_layer_groups.append(group)
return []
task_stack = [[node_in, conv_layer_groups_in, visited_in, conv_group_in, 1]]
while len(task_stack) > 0:
node, conv_layer_groups, visited, conv_group ,last_run_child_node_flag = task_stack.pop()
if not visited:
visited = []
if not conv_group:
conv_group = []
if node in visited:
last_run_child_node_flag = last_run_child_node_flag - 1
if last_run_child_node_flag > 0:
for k in range(last_run_child_node_flag):
_gather_conv_group(conv_group)
continue
visited.append(node)
if node.op.type in _CLE_TYPES:
conv_group.append(node)
if len(node.out_nodes) > 1:
conv_group = _gather_conv_group(conv_group)
elif node.op.type in _ACT_TYPES:
if node.op.type == NNDCT_OP.RELUK:
conv_group.append(node)
if len(node.out_nodes) > 1:
conv_group = _gather_conv_group(conv_group)
elif node.op.type in _POOL_TYPES:
if len(node.out_nodes) > 1:
conv_group = _gather_conv_group(conv_group)
else:
conv_group = _gather_conv_group(conv_group)
new_task_list = []
for c_node in self._graph.children(node):
new_task_list.append([c_node, conv_layer_groups, visited, conv_group, 1])
new_task_list.reverse()
if len(new_task_list) > 0:
new_task_list[0][-1] = last_run_child_node_flag + new_task_list[0][-1]
if len(new_task_list) == 0:
for k in range(last_run_child_node_flag):
_gather_conv_group(conv_group)
for task in new_task_list:
task_stack.append(task)
def _collect_layer_groups_recursion(self, node, conv_layer_groups, visited=None, conv_group=None):
def _gather_conv_group(group):
if len(group) > 1 and group not in conv_layer_groups:
conv_layer_groups.append(group)
return []
if not visited:
visited = []
if not conv_group:
conv_group = []
if node in visited:
return
visited.append(node)
if node.op.type in _CLE_TYPES:
conv_group.append(node)
if len(node.out_nodes) > 1:
conv_group = _gather_conv_group(conv_group)
elif node.op.type in _ACT_TYPES:
if node.op.type == NNDCT_OP.RELUK:
conv_group.append(node)
if len(node.out_nodes) > 1:
conv_group = _gather_conv_group(conv_group)
elif node.op.type in _POOL_TYPES:
if len(node.out_nodes) > 1:
conv_group = _gather_conv_group(conv_group)
else:
conv_group = _gather_conv_group(conv_group)
for c_node in self._graph.children(node):
self._collect_layer_groups(c_node, conv_layer_groups, visited, conv_group)
_gather_conv_group(conv_group)
'''
def _high_bias_fold(self, cle_info_set, fused_conv_bn_info):
for cle_info in cle_info_set:
conv_0 = cle_info.conv_group[0]
conv_1 = cle_info.conv_group[1]
conv_0_has_bias = conv_0.node_attr(conv_0.op.AttrName.BIAS_TERM)
conv_1_has_bias = conv_1.node_attr(conv_1.op.AttrName.BIAS_TERM)
if((conv_0_has_bias is False) or (conv_1_has_bias is False) or (conv_0 not in fused_conv_bn_info)):
continue
conv_0 = cle_info.conv_group[0]
conv_1 = cle_info.conv_group[1]
bn_params = fused_conv_bn_info[cle_info.conv_group[0]]
bn_gamma = bn_params["gamma"] / cle_info.scale_factor
bn_beta = bn_params["beta"] / cle_info.scale_factor
conv_0_bias = conv_0.op.params[conv_0.op.ParamName.BIAS].data
conv_1_bias = conv_1.op.params[conv_1.op.ParamName.BIAS].data
conv_1_weight = conv_1.op.params[conv_1.op.ParamName.WEIGHTS].data
if all([self._graph.node(node_name).op.type not in _ACT_TYPES for node_name in cle_info.conv_group[0].out_nodes]):
absorb_bias = bn_beta.copy()
else:
absorb_bias = np.zeros_like(bn_beta)
absorb_bias = np.where(bn_beta - 3 * np.fabs(bn_gamma) > 0, bn_beta - 3 * np.fabs(bn_gamma), absorb_bias)
conv_0_bias -= absorb_bias
conv_1_weight_reduced = np.sum(conv_1_weight, axis=(1, 2))
if conv_1_weight.shape[0] == 1:
bias_correction = conv_1_weight_reduced.squeeze(axis=0) * absorb_bias
else:
bias_correction = np.matmul(conv_1_weight_reduced, absorb_bias)
conv_1_bias += bias_correction
conv_0.op.params[conv_0.op.ParamName.BIAS].from_ndarray(conv_0_bias)
conv_1.op.params[conv_1.op.ParamName.BIAS].from_ndarray(conv_1_bias)
'''
def _compute_transpose_order(layout_src, layout_dest):
assert len(layout_src) == len(layout_dest)
transpose_order = []
for dim in layout_dest:
dim_order = layout_src.find(dim)
assert dim_order >= 0
transpose_order.append(dim_order)
return tuple(transpose_order)
def _get_weight_data(node, layout):
if node.op.type in [NNDCT_OP.CONV2D, NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.CONV3D, NNDCT_OP.CONVTRANSPOSE3D]:
src_layout = _OP_LAYOUTS[node.op.type]
transpose_order = OptimizeCommander._compute_transpose_order(src_layout, layout)
return node.op.params[node.op.ParamName.WEIGHTS].data.copy().transpose(transpose_order)
elif node.op.type in [NNDCT_OP.DEPTHWISE_CONV2D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D]:
# [channel_multipier, H, W, I]
weight = node.op.params[node.op.ParamName.WEIGHTS].data.copy()
# [I, H, W, channel_multiplier]
weight = weight.transpose(3, 1, 2, 0)
# depthwise always put channel num as first dimension
return weight
elif node.op.type in [NNDCT_OP.DEPTHWISE_CONV3D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D]:
weight = node.op.params[node.op.ParamName.WEIGHTS].data.copy()
# [I, D, H, W, channel_multiplier]
weight = weight.transpose(4, 1, 2, 3, 0)
# depthwise always put channel num as first dimension
return weight
'''
def _get_weight_data(node, layout):
if node.op.type in [NNDCT_OP.CONV2D, NNDCT_OP.CONVTRANSPOSE2D]:
if layout == "OHWI":
return node.op.params[node.op.ParamName.WEIGHTS].data.copy()
elif layout == "IHWO":
return node.op.params[node.op.ParamName.WEIGHTS].data.copy().transpose(3, 1, 2, 0)
else:
raise ValueError("only support OHWI/IHWO layout for conv2d weight")
elif node.op.type == NNDCT_OP.DEPTHWISE_CONV2D:
# [channel_multipier, H, W, I]
weight = node.op.params[node.op.ParamName.WEIGHTS].data.copy()
# [I, H, W, channel_multiplier]
weight = weight.transpose(3, 1, 2, 0)
# depthwise always put channel num as first dimension
return weight
'''
def _set_weight_data(node, data, layout):
if node.op.type in [NNDCT_OP.CONV2D, NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.CONV3D, NNDCT_OP.CONVTRANSPOSE3D]:
dest_layout = _OP_LAYOUTS[node.op.type]
transpose_order = OptimizeCommander._compute_transpose_order(layout, dest_layout)
data = data.transpose(transpose_order)
node.op.params[node.op.ParamName.WEIGHTS].from_ndarray(data)
elif node.op.type in [NNDCT_OP.DEPTHWISE_CONV2D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D]:
data = data.transpose(3, 1, 2, 0)
node.op.params[node.op.ParamName.WEIGHTS].from_ndarray(data)
elif node.op.type in [NNDCT_OP.DEPTHWISE_CONV3D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D]:
data = data.transpose(4, 1, 2, 3, 0)
node.op.params[node.op.ParamName.WEIGHTS].from_ndarray(data)
'''
def _set_weight_data(node, data, layout):
if node.op.type in [NNDCT_OP.CONV2D, NNDCT_OP.CONVTRANSPOSE2D]:
if layout == "OHWI":
node.op.params[node.op.ParamName.WEIGHTS].from_ndarray(data)
elif layout == "IHWO":
data = data.transpose(3, 1, 2, 0)
node.op.params[node.op.ParamName.WEIGHTS].from_ndarray(data)
else:
raise ValueError("only support OHWI/IHWO layout for weight")
elif node.op.type == NNDCT_OP.DEPTHWISE_CONV2D:
data = data.transpose(3, 1, 2, 0)
node.op.params[node.op.ParamName.WEIGHTS].from_ndarray(data)
'''
def _get_bias_data(node):
return node.op.params[node.op.ParamName.BIAS].data.copy()
def _set_bias_data(node, data):
node.op.params[node.op.ParamName.BIAS].from_ndarray(data)
def _combine_weight_and_bias(weights_ihw, bias):
if bias is not None:
bias_clamp = bias.copy().reshape(-1, 1)
if np.count_nonzero(weights_ihw) != weights_ihw.size:
#print(f"{group[0].name}' weight has zero element")
weight_ihw_clamp = weights_ihw.copy()
for channel in range(weight_ihw_clamp.shape[0]):
if np.count_nonzero(weight_ihw_clamp[channel]) == 0:
bias_clamp[channel] = 0.0
weight_ihw_clamp[channel] = 1e-7
elif np.count_nonzero(weight_ihw_clamp[channel]) != weight_ihw_clamp[channel].size:
minval = np.min(np.fabs(np.ma.masked_where(weight_ihw_clamp[channel] == 0.0,
weight_ihw_clamp[channel])))
weight_ihw_clamp[channel] = np.where(weight_ihw_clamp[channel] == 0.0,
minval, weight_ihw_clamp[channel])
weight_ihw_clamp = np.where(np.fabs(weight_ihw_clamp) < 1e-7, 1e-7, weight_ihw_clamp)
factor = np.fabs(bias_clamp) / np.fabs(weight_ihw_clamp)
else:
weight_ihw_clamp = weights_ihw.copy()
weight_ihw_clamp = np.where(np.fabs(weight_ihw_clamp) < 1e-7, 1e-7, weight_ihw_clamp)
factor = np.fabs(bias_clamp) / np.fabs(weight_ihw_clamp)
if (np.fabs(bias).max() < 10) and (np.fabs(bias).max()/np.fabs(weight_ihw_clamp).max() < 20):
if np.median(factor) > 100 or factor.mean() > 1000:
shrink_factor = 5
else:
shrink_factor = 2
else:
#if factor.mean() > 500:
if np.median(factor) > 30 or factor.mean() > 500:
shrink_factor = 20
#elif factor.mean() > 100:
elif np.median(factor) > 15 or factor.mean() > 100:
shrink_factor = 10
else:
shrink_factor = 5
#shrink_factor = 2
#print('**********************shrink_factor={}*****************************'.format(shrink_factor))
weight_bias = np.concatenate((weights_ihw, bias_clamp / shrink_factor), axis=1)
else:
weight_bias = weights_ihw
return weight_bias
def _scale_weights_and_bias_with_conv_reluk(cls, group):
ops_display_layout = {NNDCT_OP.CONV2D: "OHWI", NNDCT_OP.CONVTRANSPOSE2D: "OHWI", \
NNDCT_OP.DEPTHWISE_CONV2D: "OHWI", NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: "OHWI", \
NNDCT_OP.CONV3D: "ODHWI", NNDCT_OP.CONVTRANSPOSE3D: "ODHWI",\
NNDCT_OP.DEPTHWISE_CONV3D: "ODHWI", NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D: "ODHWI"}
ops_scale_layout = {NNDCT_OP.CONV2D: "IHWO", NNDCT_OP.CONVTRANSPOSE2D: "IHWO", \
NNDCT_OP.DEPTHWISE_CONV2D: "IHWO", NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: "IHWO", \
NNDCT_OP.CONV3D: "IDHWO", NNDCT_OP.CONVTRANSPOSE3D: "IDHWO",\
NNDCT_OP.DEPTHWISE_CONV3D: "IDHWO", NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D: "IDHWO"}
# conv: [O, H, W, I] / depthwise_conv: [I, H, W, channel_multiplier]
conv_0_weight = cls._get_weight_data(group[0], layout=ops_display_layout[group[0].op.type])
conv_0_bias = None
if group[0].node_attr(group[0].op.AttrName.BIAS_TERM):
conv_0_bias = cls._get_bias_data(group[0])
if group[0].op.type in [NNDCT_OP.CONVTRANSPOSE2D] and group[0].op.attr['group'] > 1:
group_num = group[0].op.attr['group']
conv_0_weightt_list = np.split(conv_0_weight, group_num, axis=3)
conv_0_weight = np.concatenate(conv_0_weightt_list, axis=0)
elif group[0].op.type in [NNDCT_OP.CONVTRANSPOSE3D] and group[0].op.attr['group'] > 1:
group_num = group[0].op.attr['group']
conv_0_weightt_list = np.split(conv_0_weight, group_num, axis=4)
conv_0_weight = np.concatenate(conv_0_weightt_list, axis=0)
# conv2d: [O, H, W, I] / depthwise_conv2d: [I, H, W, channel_multiplier]
# conv3d: [O, D, H, W, I] / depthwise_conv3d: [I, D, H, W, channel_multiplier]
conv_1_weight = cls._get_weight_data(group[-1], layout=ops_display_layout[group[-1].op.type])
if group[-1].op.type in [NNDCT_OP.DEPTHWISE_CONV2D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D]:
# depthwise_conv: [I, H, W, channel_multiplier] -> [channel_muliplier, H, W, I]
conv_1_weight = conv_1_weight.transpose(3, 1, 2, 0)
elif group[-1].op.type in [NNDCT_OP.DEPTHWISE_CONV3D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D]:
# depthwise_conv3d: [I, D, H, W, channel_multiplier] -> [channel_muliplier, D, H, W, I]
conv_1_weight = conv_1_weight.transpose(4, 1, 2, 3, 0)
elif group[-1].op.type in [NNDCT_OP.CONV2D] and group[-1].op.attr['group'] > 1:
group_num = group[-1].op.attr['group']
conv_1_weight_list = np.split(conv_1_weight, group_num, axis=0)
conv_1_weight = np.concatenate(conv_1_weight_list, axis=3)
elif group[-1].op.type in [NNDCT_OP.CONV3D] and group[-1].op.attr['group'] > 1:
group_num = group[-1].op.attr['group']
conv_1_weight_list = np.split(conv_1_weight, group_num, axis=0)
conv_1_weight = np.concatenate(conv_1_weight_list, axis=4)
# compute scale:
conv_0_weight_ihw = conv_0_weight.reshape(conv_0_weight.shape[0], -1)
conv_0_weight_bias = cls._combine_weight_and_bias(conv_0_weight_ihw, conv_0_bias)
merge_dim = tuple(np.arange(conv_1_weight.ndim-1))
range_0 = np.max(np.fabs(conv_0_weight_bias), axis=1)
range_1 = np.max(np.fabs(conv_1_weight), axis=merge_dim)
sqrt_of_ranges = np.sqrt(range_0 * range_1)
scale = np.ones_like(range_0)
scale = np.where(sqrt_of_ranges != 0, range_0 / sqrt_of_ranges, scale)
i_max = np.max(np.fabs(conv_0_weight_bias), axis=1)
o_max = np.max(np.fabs(conv_1_weight), axis=merge_dim)
scale = np.where((i_max + o_max) < 0.5, 1, scale)
scale_transpose = cls._compute_transpose_order(ops_display_layout[group[0].op.type],
ops_scale_layout[group[0].op.type])
conv_0_weight = conv_0_weight.transpose(scale_transpose) / scale
conv_0_weight = conv_0_weight.transpose(scale_transpose)
if conv_0_bias is not None:
conv_0_bias = conv_0_bias / scale
if group[0].op.type in [NNDCT_OP.CONVTRANSPOSE2D] and group[0].op.attr['group'] > 1:
group_num = group[0].op.attr['group']
conv_0_weightt_list = np.split(conv_0_weight, group_num, axis=0)
conv_0_weight = np.concatenate(conv_0_weightt_list, axis=3)
elif group[0].op.type in [NNDCT_OP.CONVTRANSPOSE3D] and group[0].op.attr['group'] > 1:
group_num = group[0].op.attr['group']
conv_0_weightt_list = np.split(conv_0_weight, group_num, axis=0)
conv_0_weight = np.concatenate(conv_0_weightt_list, axis=4)
conv_1_weight = conv_1_weight * scale
if group[-1].op.type in [NNDCT_OP.DEPTHWISE_CONV2D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D]:
# depthwise_conv: [channel_muliplier, H, W, I] -> [I, H, W, channel_multiplier]
conv_1_weight = conv_1_weight.transpose(3, 1, 2, 0)
elif group[-1].op.type in [NNDCT_OP.DEPTHWISE_CONV3D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D]:
# depthwise_conv: [channel_muliplier, D, H, W, I] -> [I, D, H, W, channel_multiplier]
conv_1_weight = conv_1_weight.transpose(4, 1, 2, 3, 0)
elif group[-1].op.type in [NNDCT_OP.CONV2D] and group[-1].op.attr['group'] > 1:
group_num = group[-1].op.attr['group']
conv_1_weight_list = np.split(conv_1_weight, group_num, axis=3)
conv_1_weight = np.concatenate(conv_1_weight_list, axis=0)
elif group[-1].op.type in [NNDCT_OP.CONV3D] and group[-1].op.attr['group'] > 1:
group_num = group[-1].op.attr['group']
conv_1_weight_list = np.split(conv_1_weight, group_num, axis=4)
conv_1_weight = np.concatenate(conv_1_weight_list, axis=0)
cls._set_weight_data(group[0], conv_0_weight, layout=ops_display_layout[group[0].op.type])
if conv_0_bias is not None:
cls._set_bias_data(group[0], conv_0_bias)
cls._set_weight_data(group[-1], conv_1_weight, layout=ops_display_layout[group[-1].op.type])
if (len(group)==3) and (group[1].op.type=='reluk'):
channel_num = conv_0_weight.shape[0]
channel_max = group[1].op.channel_max
if (isinstance(channel_max, float)
or isinstance(channel_max, int)
or (isinstance(channel_max, np.array) and channel_max.ndim == 1)):
group[1].op.channel_max = (channel_max/scale).reshape((1,channel_num,1,1)).tolist()
elif (isinstance(channel_max, np.array) and channel_max.ndim == 4):
group[1].op.channel_max = (channel_max/(scale.reshape((1,channel_num,1,1)))).tolist()
return scale
def _cross_layer_equalization_for_conv(cls, equalized_groups):
iters = 1
for i in range(iters):
cle_info_set = []
for group in equalized_groups:
#print(group)
'''
if len(group) == 3:
scale_factor = cls._scale_weights_and_bias_with_depthwise_conv(group)
else:
scale_factor = cls._scale_weights_and_bias_with_conv(group)
'''
scale_factor = cls._scale_weights_and_bias_with_conv_reluk(group)
return cle_info_set
def _collect_equalized_groups(cls, layer_groups):
equalized_groups = []
for group in layer_groups:
equalized_groups += cls._convert_layer_group_to_equalized_groups(group)
return equalized_groups
'''
def _convert_layer_group_to_equalized_groups(layer_group):
equalized_groups = []
prev_conv, *left_convs = layer_group
while left_convs:
next_conv, *others = left_convs
if next_conv.op.type == NNDCT_OP.DEPTHWISE_CONV2D:
if others:
next_non_depthwise_conv = others.pop(0)
equalized_groups.append((prev_conv, next_conv, next_non_depthwise_conv))
prev_conv = next_non_depthwise_conv
else:
equalized_groups.append((prev_conv, next_conv))
prev_conv = next_conv
#equalized_groups.append((prev_conv, next_conv))
#prev_conv = next_conv
left_convs = others
return equalized_groups
'''
def _convert_layer_group_to_equalized_groups(layer_group):
equalized_groups = []
prev_conv, *left_nodes = layer_group
while left_nodes:
next_node, *others = left_nodes
if prev_conv.op.type in _CLE_TYPES:
if next_node.op.type == NNDCT_OP.RELUK:
if others:
next_conv = others.pop(0)
if next_conv.op.type in _CLE_TYPES:
equalized_groups.append((prev_conv, next_node, next_conv))
prev_conv = next_conv
elif next_node.op.type in _CLE_TYPES:
equalized_groups.append((prev_conv, next_node))
prev_conv = next_node
else:
prev_conv = next_node
else:
prev_conv = next_node
left_nodes = others
return equalized_groups
class ModuleHooker(object):
_parameter_map = {
torch_op_def.TorchConv1d.ParamName.WEIGHTS: "weight",
torch_op_def.TorchConv1d.ParamName.BIAS: "bias",
torch_op_def.TorchConv2d.ParamName.WEIGHTS: "weight",
torch_op_def.TorchConv2d.ParamName.BIAS: "bias",
torch_op_def.TorchConv3d.ParamName.WEIGHTS: "weight",
torch_op_def.TorchConv3d.ParamName.BIAS: "bias",
torch_op_def.TorchLinear.ParamName.WEIGHTS: "weight",
torch_op_def.TorchLinear.ParamName.BIAS: "bias",
torch_op_def.TorchBatchNorm.ParamName.GAMMA: "weight",
torch_op_def.TorchBatchNorm.ParamName.BETA: "bias",
torch_op_def.TorchBatchNorm.ParamName.MOVING_MEAN: "running_mean",
torch_op_def.TorchBatchNorm.ParamName.MOVING_VAR: "running_var",
torch_op_def.TorchLayerNorm.ParamName.GAMMA: "weight",
torch_op_def.TorchLayerNorm.ParamName.BETA: "bias",
torch_op_def.TorchEmbedding.ParamName.WEIGHT: "weight",
torch_op_def.TorchPReLU.ParamName.WEIGHT: "weight",
torch_op_def.TorchInstanceNorm.ParamName.GAMMA: "weight",
torch_op_def.TorchInstanceNorm.ParamName.BETA: "bias",
torch_op_def.TorchGroupNorm.ParamName.GAMMA: "weight",
torch_op_def.TorchGroupNorm.ParamName.BETA: "bias"
}
def name_modules(module):
for name, op in module.named_modules():
op.name = name
def add_outputs(module, record_once):
for op in module.modules():
op.__outputs__ = None if record_once else TimeStepData()
op.__enable_record__ = False
def add_output_intime(module):
for op in module.modules():
op.__enable_output_intime_ = False
op.__called_times_ = 0
def add_input_dump_time(module):
module.__enable_input_dump_ = False
module.__input_called_times_ = 0
def apply_to_children(module, func):
for child in module.children():
child.apply(func)
def turn_on_record_outputs(module):
for op in module.modules():
op.__enable_record__ = True
def turn_off_record_outputs(module):
for op in module.modules():
op.__enable_record__ = False
def clear_record_outputs(module):
for op in module.modules():
if hasattr(op, "__outputs__") and op.__outputs__ is not None:
if isinstance(op.__outputs__, TimeStepData):
op.__outputs__.clear()
else:
op.__outputs__ = None
def turn_on_output_intime(module):
for op in module.modules():
op.__enable_output_intime_ = True
def turn_off_output_intime(module):
for op in module.modules():
op.__enable_output_intime_ = False
def clear_op_called_times(module):
for op in module.modules():
op.__called_times_ = 0
def turn_on_input_dump(module):
module.__enable_input_dump_ = True
def turn_off_input_dump(module):
module.__enable_input_dump_ = False
def clear_input_called_times(module):
module.__input_called_times_ = 0
def register_state_dict_hook(cls, module):
def _resume_state_dict_key(op, destination, prefix, local_meta_data):
if op.node:
# only handle parameters
prefixes = [".".join(param_tensor.name.split(".")[:-1]) for param_tensor in op.node.op.params.values()]
if len(prefixes) == 0:
raise RuntimeError("The num of module params is not equal with graph")
new_prefix = prefixes[0].split("::")[-1] + "."
for name, data in op._parameters.items():
if prefix + name in destination:
destination[new_prefix + name] = data
del destination[prefix + name]
def _state_dict_hooker(op, hooker):
if len(op._parameters):
op._register_state_dict_hook(hooker)
partial_func = partial(_state_dict_hooker, hooker=_resume_state_dict_key)
cls.apply_to_children(module, partial_func)
def register_output_hook(cls, module, record_once=True):
def _record_outputs(op, inputs, outputs):
if op.__enable_record__ is True and hasattr(op, "node"):
def get_outputs_value(outputs):
if isinstance(outputs, torch.Tensor):
return outputs.cpu().detach().clone()
elif hasattr(outputs, "values") and hasattr(outputs, "indices"):
return (outputs.values.cpu().detach().clone(), outputs.indices.cpu().detach().clone())
elif isinstance(outputs, Sequence):
return type(outputs)([get_outputs_value(op) for op in outputs])
else:
return outputs
if isinstance(op.__outputs__, TimeStepData) or (op.__outputs__ is None) or \
(NndctOption.nndct_record_slow_mode.value is True):
output_data = get_outputs_value(outputs)
if isinstance(op.__outputs__, TimeStepData):
op.__outputs__.append(output_data)
else:
op.__outputs__ = output_data
def _output_hooker(op, record_func):
op.register_forward_hook(record_func)
if not hasattr(module, "__outputs__"):
cls.add_outputs(module, record_once)
partial_func = partial(_output_hooker, record_func=_record_outputs)
cls.apply_to_children(module, partial_func)
def register_input_dump_hook(cls, module):
# dump input float data
def dump_input_data(module, inputs):
input_tensor_name = module._input_tensors_name
inputs_list = []
def inputs_flatten(tensors, tensors_flatten):
if isinstance(tensors, torch.Tensor):
tensors_flatten.append(tensors.cpu().detach().numpy())
elif isinstance(tensors, (list, tuple)):
for tensor in tensors:
inputs_flatten(tensor, tensors_flatten)
else:
raise TypeError(f"Type of input_args should be tuple/list/torch.Tensor.")
inputs_flatten(inputs, inputs_list)
if len(input_tensor_name) != len(inputs_list):
raise TypeError(f"Length of inputs should be the same as length of inputs name.")
quantizer = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
dir_name = os.path.join(quantizer.output_dir, 'deploy_check_data')
input_path = os.path.join(dir_name, 'input')
if module.__input_called_times_== 0:
nndct_utils.create_work_dir(dir_name)
nndct_utils.create_work_dir(os.path.join(dir_name, 'input'))
for name, tensor in zip(input_tensor_name, inputs_list):
name = name.replace('/', '_')
current_input_path = os.path.join(input_path, name)
if module.__input_called_times_== 0:
nndct_utils.create_work_dir(current_input_path)
shape_file_name = os.path.join(current_input_path, 'shape.txt')
np.array(tensor.shape).tofile(shape_file_name, sep=' ')
file_name = os.path.join(current_input_path, str(module.__input_called_times_))
txt_file = '.'.join([file_name, 'txt'])
tensor.flatten().tofile(txt_file, sep="\n", format="%g")
module.__input_called_times_ += 1
if not hasattr(module, "__input_called_times_"):
cls.add_input_dump_time(module)
module.register_forward_pre_hook(dump_input_data)
def register_output_intime_hook(cls, module):
# only for jit script mode in rnnt quantization
def _dump_intime(op, inputs, outputs):
if op.__enable_output_intime_ is True and hasattr(op, "node") and op.node.in_quant_part:
def get_outputs_value(outputs):
if isinstance(outputs, torch.Tensor):
return outputs.cpu().detach().numpy()
elif hasattr(outputs, "values") and hasattr(outputs, "indices"):
return (outputs.values.cpu().detach().numpy(), outputs.indices.cpu().detach().numpy())
elif isinstance(outputs, Sequence):
return type(outputs)([get_outputs_value(op) for op in outputs])
else:
return None
dir_name = os.path.join(op.quantizer.output_dir, 'deploy_check_data')
params_path = os.path.join(dir_name, 'params')
output_path = os.path.join(dir_name, 'output')
if op.__called_times_== 0:
nndct_utils.create_work_dir(dir_name)
nndct_utils.create_work_dir(os.path.join(dir_name, 'params'))
nndct_utils.create_work_dir(os.path.join(dir_name, 'output'))
shape_file_name = os.path.join(dir_name, 'shape.txt')
# dump params
for k, para in op.quantizer.configer.quant_node_params(op.node).items():
bit_width = 16
fix_point = None
if para.name in op.quantizer.quant_config['param'].keys():
#bit_width, fix_point = op.quantizer.quant_config['param'][para.name]
bit_width, fix_point = op.quantizer.get_quant_config(para.name, False, 'param')
data = para.data
para_name = para.name.replace('/', '_')
file_name = os.path.join(params_path, para_name)
shape_file_name = '_'.join([file_name, 'shape.txt'])
np.array(data.shape).tofile(shape_file_name, sep=' ')
bin_file = '.'.join([file_name+'_fix', 'bin'])
txt_file = '.'.join([file_name+'_fix', 'txt'])
if fix_point is not None:
quantize_data2int(data.flatten(), bit_width, fix_point).tofile(txt_file, sep="\n", format="%g")
quantize_data2int(data.flatten(), bit_width, fix_point).tofile(bin_file)
# dump output
def dump_output_data(op, output_data, index=None):
if output_data is not None:
bit_width = 16
fix_point = None
end = op.quantizer.configer.quant_output(op.node.name).name
#bit_width, fix_point = op.quantizer.quant_config['output'][end]
node_name = op.node.name.replace('/', '_')
if end in op.quantizer.quant_config['output'].keys():
if index is None:
bit_width, fix_point = op.quantizer.get_quant_config(end, False, 'output')
current_output_path = os.path.join(output_path, node_name + "_fix")
else:
bit_width, fix_point = op.quantizer.get_quant_config(end, False, 'output', index)
index_str = str(index)
current_output_path = os.path.join(output_path, node_name + "_fix_i" + index_str)
if op.__called_times_== 0:
nndct_utils.create_work_dir(current_output_path)
shape_file_name = os.path.join(current_output_path, 'shape.txt')
np.array(output_data.shape).tofile(shape_file_name, sep=' ')
file_name = os.path.join(current_output_path, str(op.__called_times_))
bin_file = '.'.join([file_name, 'bin'])
txt_file = '.'.join([file_name, 'txt'])
if output_data.ndim > 0:
data = output_data[:]
else:
data = output_data
if fix_point is not None:
quantize_data2int(data.flatten(), bit_width, fix_point).tofile(txt_file, sep="\n", format="%g")
quantize_data2int(data.flatten(), bit_width, fix_point).tofile(bin_file)
def dump_output(op, output_data):
index = 0
if isinstance(output_data, Sequence):
for output in output_data:
if isinstance(output, Sequence):
for i in output:
dump_output_data(op, i, index)
index += 1
else:
dump_output_data(op, output, index)
index += 1
else:
dump_output_data(op, output_data, None)
#if op.quantizer.configer.is_node_quantizable(op.node, True):
if op.quantizer.have_quant_or_not(op.node.name):
output_data = get_outputs_value(outputs)
dump_output(op, output_data)
op.__called_times_ += 1
def _output_hooker(op, output_func):
op.register_forward_hook(output_func)
if not hasattr(module, "__called_times_"):
cls.add_output_intime(module)
partial_func = partial(_output_hooker, output_func=_dump_intime)
cls.apply_to_children(module, partial_func)
def detach_node_from_module(cls, module):
def _del_node(op):
if hasattr(op, "node"):
op.attached_node_name = op.node.name
del op.node
if hasattr(op, "params_name"):
del op.params_name
cls.apply_to_children(module, _del_node)
def hook_module_with_node(cls, module, graph):
def _add_node_on_module(op):
if not hasattr(op, "attached_node_name"):
idx = int(op.name.split('_')[-1])
node = graph.get_node_by_idx(idx)
else:
node = graph.node(op.attached_node_name)
if node is not None:
node.module = op
op.node = node
op.params_name = [v.name for v in node.op.params.values()]
if not hasattr(module, "name"):
cls.name_modules(module)
if _is_module_hooked(module):
cls.detach_node_from_module(module)
# partial_func = partial(_add_node_on_module, graph=graph)
cls.apply_to_children(module, _add_node_on_module)
# @classmethod
# def hook_module_with_quantizer(cls, module, quantizer):
# if not _is_module_hooked(module):
# cls.hook_module_with_node(module, quantizer.Nndctgraph)
def update_parameters(cls, module, graph, graph2module):
def safe_torch_nn_Parameter(torch_tensor, requires_grad):
if requires_grad is not None:
return torch.nn.Parameter(torch_tensor, requires_grad=requires_grad)
return torch.nn.Parameter(torch_tensor)
def _graph2module(op):
node = getattr(op, "node", None)
for param_type, tensor in node.op.params.items():
if node.has_bound_params() and node.op.type != NNDCT_OP.LAYER_NORM: # LayerNorm weight, bias not change format
py_tensor_util.param_to_torch_format(tensor)
data = np.copy(tensor.data)
if node.op.type in [NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.CONVTRANSPOSE3D] and param_type == node.op.ParamName.WEIGHTS:
# data = data.transpose(1, 0, 2, 3)
data = data.swapaxes(0, 1)
data = np.ascontiguousarray(data)
if node.op.type in [NNDCT_OP.DEPTHWISE_CONV2D, NNDCT_OP.DEPTHWISE_CONV3D] and param_type == node.op.ParamName.WEIGHTS:
out_channels = node.node_config("out_channels")
kernel_size = node.node_config("kernel_size")
data = data.reshape((out_channels, 1, *kernel_size))
if node.op.type in [NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D] and param_type == node.op.ParamName.WEIGHTS:
in_channels = node.node_config("in_channels")
kernel_size = node.node_config("kernel_size")
data = data.reshape((1, in_channels, *kernel_size))
data = data.swapaxes(0, 1)
data = np.ascontiguousarray(data)
torch_tensor = torch.from_numpy(data)
tensor_dtype = torch_tensor.dtype
param_name = cls._parameter_map.get(param_type, param_type.value)
if node.has_bound_params():
if hasattr(op, param_name):
if isinstance(getattr(op, param_name), torch.Tensor):
torch_tensor = torch_tensor.to(
getattr(op, param_name)).to(tensor_dtype)
else:
torch_tensor = torch_tensor.to(
getattr(op, param_name).data).to(tensor_dtype)
if param_name in op._buffers:
op._buffers[param_name] = torch_tensor
else:
op._parameters[param_name] = safe_torch_nn_Parameter(torch_tensor, tensor.requires_grad)
else:
NndctScreenLogger().warning(f"new parameter: '{param_name}' is registered in {node.name}")
op.register_parameter(param_name, safe_torch_nn_Parameter(torch_tensor, tensor.requires_grad))
else:
torch_tensor = torch_tensor.to(device=GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_DEVICE))
module.register_parameter(param_name, safe_torch_nn_Parameter(torch_tensor, tensor.requires_grad))
if node.has_bound_params() and node.op.type != NNDCT_OP.LAYER_NORM:
py_tensor_util.param_to_nndct_format(tensor)
# No one will call it and will be removed later.
def _module2graph(op):
node = getattr(op, "node", None)
for param_name, tensor in node.op.params.items():
if hasattr(op, cls._parameter_map[param_name]):
if param_name in [torch_op_def.TorchBatchNorm.ParamName.MOVING_MEAN,
torch_op_def.TorchBatchNorm.ParamName.MOVING_VAR]:
torch_tensor = getattr(op, cls._parameter_map[param_name])
else:
torch_tensor = getattr(op, cls._parameter_map[param_name]).data
torch_tensor_data = np.copy(torch_tensor.detach().cpu().numpy())
if node.op.type == NNDCT_OP.CONVTRANSPOSE2D and param_name == node.op.ParamName.WEIGHTS:
torch_tensor_data = torch_tensor_data.transpose(1, 0, 2, 3)
torch_tensor_data = np.ascontiguousarray(torch_tensor_data)
if node.op.type == NNDCT_OP.DEPTHWISE_CONV2D and param_name == node.op.ParamName.WEIGHTS:
in_channels = node.node_config("in_channels")
out_channels = node.node_config("out_channels")
kernel_size = node.node_config("kernel_size")
channel_mutiplier = int(out_channels / in_channels)
torch_tensor_data = torch_tensor_data.reshape((channel_mutiplier, in_channels, *kernel_size))
tensor.from_ndarray(torch_tensor_data)
py_tensor_util.param_to_nndct_format(tensor)
if not _is_module_hooked(module):
cls.hook_module_with_node(module, graph)
func = _graph2module if graph2module else _module2graph
# partial_func = partial(
# _graph2module if graph2module else _module2graph,
# param_map=cls._parameter_map)
cls.apply_to_children(module, func)
def _get_output_data(outptus, outptus_name):
try:
# output_data = outptus.cpu().detach().numpy()
output_data = outptus.numpy()
except AttributeError:
NndctScreenLogger().warning(f"{outptus_name} is not tensor.")
output_data = outptus
return output_data
def _get_output_shape(outputs, outputs_name):
try:
output_shape = list(outputs.size())
except AttributeError:
NndctScreenLogger().warning(f"{outputs_name} is not tensor. It's shape is ignored.")
return None
else:
return output_shape
def update_blobs_once(cls, module, graph=None, time_step=None, update_shape_only=False):
def _updata_tensor_data(node, nndct_tensor, torch_tensor):
output_data = cls._get_output_data(torch_tensor, nndct_tensor.name)
if output_data is not None and isinstance(output_data, np.ndarray):
output_data = permute_data(output_data, node.transpose_out_order)
nndct_tensor.from_ndarray(output_data)
else:
nndct_tensor.data = output_data
def _updata_tensor_shape(node, nndct_tensor, torch_tensor):
output_shape = cls._get_output_shape(torch_tensor, nndct_tensor.name)
if output_shape is not None:
output_shape = permute_axes(output_shape, node.transpose_out_order)
nndct_tensor.shape = output_shape
def _update_node_outputs(op):
if hasattr(op, "node") and op.node is not None:
node = op.node
if not hasattr(op, "__outputs__") or op.__outputs__ is None or (isinstance(op.__outputs__, TimeStepData) and len(op.__outputs__) == 0):
return
one_step_outputs = op.__outputs__[
time_step] if time_step is not None else op.__outputs__
if len(node.out_tensors) > 1:
for idx, tensor in enumerate(node.out_tensors):
if not update_shape_only:
_updata_tensor_data(node, tensor, one_step_outputs[idx])
else:
_updata_tensor_shape(node, tensor, one_step_outputs[idx])
else:
tensor = node.out_tensors[0]
if not update_shape_only:
_updata_tensor_data(node, tensor, one_step_outputs)
else:
_updata_tensor_shape(node, tensor, one_step_outputs)
if not _is_module_hooked(module) and graph is not None:
cls.hook_module_with_node(module, graph)
cls.apply_to_children(module, _update_node_outputs)
def clone_quant_module(cls, quant_module, quant_graph):
quantizer = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
if _is_module_hooked(quant_module):
cls.detach_node_from_module(quant_module)
cls.hook_module_with_quantizer(quant_module, None)
new_quant_module = copy.deepcopy(quant_module)
cls.hook_module_with_node(quant_module, quant_graph)
#cls.hook_module_with_node(quant_module, quantizer.graph)
cls.hook_module_with_quantizer(quant_module, quantizer)
new_graph = Graph(graph_name=quant_graph.name)
new_graph.clone_from(quant_graph)
#new_graph = Graph(graph_name=quantizer.graph.name)
#new_graph.clone_from(quantizer.graph)
cls.hook_module_with_node(new_quant_module, new_graph)
cls.hook_module_with_quantizer(new_quant_module, quantizer)
else:
new_quant_module = copy.deepcopy(quant_module)
return new_quant_module
def hook_module_with_quantizer(cls, module, quantizer):
def _add_quantizer_on_module(op):
if hasattr(op, "quantizer"):
op.quantizer = quantizer
cls.apply_to_children(module, _add_quantizer_on_module)
def hook_module_with_input_device_checker(cls, module, module_gen_from_script):
def _check_input_args(op, input):
op.nndct_inferenced = True
quant_device = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_DEVICE)
input_devices = collect_input_devices(input)
if any([device != quant_device.type for device in input_devices]):
NndctScreenLogger().warning2user_once(QWarning.DEVICE_MISMATCH, f"The Device of input args mismatch with quantizer device type({quant_device.type}).")
_, input = to_device(None, input, device=quant_device)
if module_gen_from_script:
return input
else:
return get_flattened_input_args(input)
module.register_forward_pre_hook(_check_input_args)
def register_tensor_dtype_and_shape_hook(cls, module):
torch2nndct_dtype_mapper = lambda torch_dtype: {
torch.float64: "float64",
torch.float32: "float32",
torch.float16: "float16",
torch.int64: "int64",
torch.int32: "int32",
torch.bool: "bool",
}.get(torch_dtype, torch_dtype)
handlers = []
def _record_dtype_and_shape(op, inputs, outputs):
def _get_output_dtype_and_shape(output):
if isinstance(output, torch.Tensor):
return torch2nndct_dtype_mapper(output.dtype), tuple(output.shape)
else:
return type(output), None
quantizer = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
if hasattr(op, "node"):
node = op.node
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
for nndct_tensor, output in zip(node.out_tensors, outputs):
dtype, tensor_shape = _get_output_dtype_and_shape(output)
nndct_tensor.from_des(tensor_shape, dtype)
def _hooker(op, record_func):
handlers.append(op.register_forward_hook(record_func))
partial_func = partial(_hooker, record_func=_record_dtype_and_shape)
cls.apply_to_children(module, partial_func)
return handlers
def equalize_weights_cross_conv_layers(model, inputs):
model.eval()
graph = _get_graph(model, inputs)
model_topo = model_topo_mod.build_model_topo(graph, {})
transformer = mt.ModuleTransformer(model, model_topo, [
transforms_mod.FuseConv2dBatchNorm(),
transforms_mod.FuseConv3dBatchNorm()
])
model, _, _ = transformer.transform()
graph = _get_graph(model, inputs)
commander = OptimizeCommander(graph)
commander.DecoupleSharedParamsInConv()
graph = commander.equalize_weights_cross_conv_layers()
_attach_node_to_model(model, graph)
ModuleHooker.update_parameters(model, graph, True)
_detach_node_from_model(model)
return model, graph | null |
23,768 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
def logging_warn(code, message):
NndctScreenLogger().warning2user(code, message) | null |
23,769 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
def logging_error(code, message):
NndctScreenLogger().check2user(code, message, False) | null |
23,770 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def enable_quant(model):
model.apply(tqt_mod.enable_quant)
logging.info('Enable quantization: quantized operations will be performed.') | null |
23,771 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def disable_quant(model):
model.apply(tqt_mod.disable_quant)
logging.info(
'Disable quantization: floating point operations will be performed.') | null |
23,772 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def enable_warmup(model):
model.apply(tqt_mod.enable_warmup)
logging.info('Initialize quantizer.') | null |
23,773 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
def disable_warmup(model):
model.apply(tqt_mod.disable_warmup) | null |
23,774 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def freeze_quant(model):
model.apply(tqt_mod.freeze_quant)
logging.info('Scale of quantizer has been frozen.') | null |
23,775 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def freeze_bn_stats(model):
model.apply(conv_fused.freeze_bn_stats)
logging.info('Running statistics of batch normlization has been frozen.') | null |
23,776 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def fuse_conv_bn(model):
model.apply(conv_fused.fuse_conv_bn)
model.conv_bn_fused = True
logging.info('Merge batchnorm to conv.') | null |
23,777 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
def quantizer_parameters(model):
return [
param for name, param in model.named_parameters()
if 'log_threshold' in name
] | null |
23,778 | import copy
import os
import torch
import types
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.optimizer import QuantOptimizer
from nndct_shared.utils import NndctOption
from nndct_shared.utils import NndctScreenLogger
from nndct_shared.utils import io as io_util
from nndct_shared.utils import option_util
from nndct_shared.utils.msg_code import QError
from nndct_shared.utils.msg_code import QWarning
from pytorch_nndct import parse
from pytorch_nndct import utils as py_utils
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.nn.quantization.modules import conv_fused
from pytorch_nndct.nn.quantization.modules import tqt as tqt_mod
from pytorch_nndct.parse.parse_utils import get_short_name
from pytorch_nndct.qproc import base as qproc
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
def non_quantizer_parameters(model):
return [
param for name, param in model.named_parameters()
if 'log_threshold' not in name
] | null |
23,779 | import abc
from torch import nn
from nndct_shared.base import NNDCT_OP as OpTypes
from nndct_shared.optimization.commander import OptimizeCommander
from nndct_shared.utils import registry
from pytorch_nndct import parse
from pytorch_nndct.nn import quantization as nnq
from pytorch_nndct.nn.nonlinear import mode
from pytorch_nndct.qproc.ModuleHooker import ModuleHooker
from pytorch_nndct.quantization import config as config_mod
from pytorch_nndct.quantization import model_topo as model_topo_mod
from pytorch_nndct.quantization import module_transform as mt
from pytorch_nndct.quantization import transforms as transforms_mod
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
class FP8QuantizeScheme(QuantizeScheme):
def get_transforms(self):
gemm_layer_transforms = [
transforms_mod.QuantizeConv2dBatchNorm(),
transforms_mod.QuantizeConv3dBatchNorm(),
transforms_mod.QuantizeConvNd(),
transforms_mod.QuantizeLinear()
]
activation_transforms = [
transforms_mod.ReplaceSoftmax(),
transforms_mod.ReplaceSigmoid(),
transforms_mod.ReplaceTanh(),
transforms_mod.ReplaceGELU(),
transforms_mod.ReplaceLayerNorm(),
]
if mode.is_no_approx(self.runtime_config.approx_mode):
return gemm_layer_transforms
else:
return gemm_layer_transforms + activation_transforms
def get_runtime_specification(self):
node_to_spec = {}
for node in self.graph.nodes:
generator = _spec_generator.get(node.op.type, self.__class__.default_spec)
spec = generator(self)
if not spec or not isinstance(spec, config_mod.LayerRuntimeSpec):
raise ValueError(
'Expecting a LayerRuntimeSpec object, but got {}'.format(
type(spec)))
node_to_spec[node.name] = spec
return node_to_spec
def _fp_args_from_runtime_config(self):
return {
'exponent_bits': self.runtime_config.exp_bits,
'bias_mode': self.runtime_config.exp_bias_mode,
'exponent_bias': self.runtime_config.exp_bias,
'round_mode': self.runtime_config.round_mode,
}
def default_spec(self):
return config_mod.LayerRuntimeSpec(self.runtime_config)
def conv_spec(self):
fp_args = self._fp_args_from_runtime_config()
spec = self.default_spec()
spec.add_input_quantizer(nnq.FP8Quantizer(**fp_args))
spec.add_weight_quantizer('weight', nnq.FP8Quantizer(**fp_args))
spec.add_weight_quantizer('bias', nn.Identity())
return spec
def depth_conv_spec(self):
fp_args = self._fp_args_from_runtime_config()
spec = self.default_spec()
spec.add_input_quantizer(nnq.FP8Quantizer(**fp_args))
spec.add_weight_quantizer('weight', nnq.FP8Quantizer(**fp_args))
spec.add_weight_quantizer('bias', nn.Identity())
return spec
def linear_spec(self):
fp_args = self._fp_args_from_runtime_config()
spec = self.default_spec()
spec.add_input_quantizer(nnq.FP8Quantizer(**fp_args))
spec.add_weight_quantizer('weight', nnq.FP8Quantizer(**fp_args))
spec.add_weight_quantizer('bias', nn.Identity())
return spec
def matmul_spec(self):
fp_args = self._fp_args_from_runtime_config()
spec = self.default_spec()
spec.add_input_quantizer(nnq.FP8Quantizer(**fp_args))
spec.add_input_quantizer(nnq.FP8Quantizer(**fp_args))
spec.add_output_quantizer(nnq.FP32Quantizer())
return spec
OpTypes.ADAPTIVEAVGPOOL2D, OpTypes.ADD, OpTypes.SIGMOID, OpTypes.TANH,
OpTypes.SOFTMAX, OpTypes.GELU, OpTypes.RELU, 'aten::silu'
])
def input_bfloat16_spec(self):
spec = self.default_spec()
spec.add_input_quantizer(nnq.BFloat16Quantizer())
return spec
def quantize_model(model,
inputs,
exp_bits=5,
exp_bias_mode='ieee',
exp_bias=None):
parser = parse.TorchParser()
graph = parser(model._get_name(), model, inputs)
rt_config = config_mod.RuntimeConfig()
rt_config.exp_bits = exp_bits
rt_config.exp_bias_mode = exp_bias_mode
rt_config.exp_bias = exp_bias
scheme = FP8QuantizeScheme(model, graph, rt_config)
return scheme.apply() | null |
23,780 | import copy
import numpy as np
from abc import ABC, abstractmethod
import torch
from nndct_shared.base import key_names, NNDCT_OP
from nndct_shared.utils import tensor_util, NndctOption, NndctScreenLogger, QWarning, QError
from pytorch_nndct.nn import fake_quantize_per_tensor
def has_inf_nan():
return hasattr(torch, "isinf") and hasattr(torch, "isnan")
def is_valid_tensor_for_quantizer(tensor):
if has_inf_nan():
inf = torch.isinf(tensor.detach())
nan = torch.isnan(tensor.detach())
if inf.sum() > 0 or nan.sum() > 0:
return False
return True | null |
23,781 | import copy
import numpy as np
from abc import ABC, abstractmethod
import torch
from nndct_shared.base import key_names, NNDCT_OP
from nndct_shared.utils import tensor_util, NndctOption, NndctScreenLogger, QWarning, QError
from pytorch_nndct.nn import fake_quantize_per_tensor
def convert_datatype_to_pytorch_type(datatype):
return {
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float
}.get(datatype, datatype) | null |
23,782 | import copy
import numpy as np
from abc import ABC, abstractmethod
import torch
from nndct_shared.base import key_names, NNDCT_OP
from nndct_shared.utils import tensor_util, NndctOption, NndctScreenLogger, QWarning, QError
from pytorch_nndct.nn import fake_quantize_per_tensor
def convert_datatype_to_index(datatype):
return {
'bfloat16': 1,
'float16': 2,
'float32': 3
}.get(datatype, datatype) | null |
23,783 | import sys
import numpy as np
from abc import ABC, abstractmethod
from scipy.stats import entropy
from scipy import stats
import torch
from collections import Counter
import pytorch_nndct as py_nndct
from pytorch_nndct.nn.modules.fix_ops import diffs_fix_pos
from nndct_shared.utils import NndctOption, NndctScreenLogger, QError, QWarning
from nndct_shared.base import NNDCT_OP
from pytorch_nndct.nn import fake_quantize_per_tensor, fake_quantize_per_channel
from pytorch_nndct.nn import fake_quantize_per_tensor_tensorrt, fake_quantize_per_channel_tensorrt
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
_CONV_LINEAR_TYPES = [NNDCT_OP.CONV2D, NNDCT_OP.DEPTHWISE_CONV2D, \
NNDCT_OP.CONV3D, NNDCT_OP.DEPTHWISE_CONV3D, \
NNDCT_OP.DENSE, NNDCT_OP.LINEAR]
_CONV_TRANSPOSE_TYPES = [NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D,\
NNDCT_OP.CONVTRANSPOSE3D, NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D]
_CONV_NORM_TYPES = [NNDCT_OP.BATCH_NORM, NNDCT_OP.LAYER_NORM]
class FloatQuantPerChannelAlgo(PerChannelQuantAlgo):
def __init__(self, config, axis):
super().__init__(config, axis)
def calibrate(self, tensor):
with torch.no_grad():
#tensor = tensor.abs()
axis = self._axis if isinstance(self._axis, (list, tuple)) else [self._axis]
reduce_axis = []
for i in range(tensor.dim()):
if not i in axis:
reduce_axis.append(i)
#local_max = self.reduce_amax(tensor,axis=reduce_axis).detach()
local_max, local_min = self.reduce_min_max(tensor,axis=reduce_axis)
local_max = torch.squeeze(local_max)
local_min = torch.squeeze(local_min)
if self._symmetric_mode == "symmetric":
local_max, local_min = self._get_float_max_min(local_max, local_min)
if self._float_max is None or self._statistic_local:
self._float_max = local_max
else:
if local_max.shape != self._float_max.shape:
raise RuntimeError("max shape changed!")
self._float_max.copy_(torch.max(self._float_max, local_max).data)
if self._float_min is None or self._statistic_local:
self._float_min = local_min
else:
if local_min.shape != self._float_min.shape:
raise RuntimeError("mix shape changed!")
self._float_min.copy_(torch.min(self._float_min, local_min).data)
if self._statistic_local:
self.calib_scale()
def calib_scale(self):
if self._symmetric_mode == "symmetric":
self._scale = self._float_max/self._quant_max
self._zero_point = torch.zeros(self._scale.shape[0], dtype=torch.int32, device=self._scale.device)
else:
self._scale = (self._float_max-self._float_min)/(self._quant_max-self._quant_min)
self._zero_point = (-self._float_min/self._scale).round().int32()
def calib_global_statis(self):
self.calib_scale()
def reduce_min_max(input, axis=None, keepdims=True):
with torch.no_grad():
o_max = input.detach().clone()
o_min = input.detach().clone()
if axis is None:
o_max = torch.max(o_max)
o_min = torch.min(o_min)
else:
if isinstance(axis, int):
o_max, _ = torch.max(o_max, dim=axis, keepdim=keepdims)
o_min, _ = torch.min(o_min, dim=axis, keepdim=keepdims)
else:
if isinstance(axis, tuple) and len(axis) > input.dim():
raise ValueError("Cannot reduce more axes than tensor's dim.")
for i in axis:
o_max, _ = torch.max(o_max, dim=i, keepdim=True)
o_min, _ = torch.min(o_min, dim=i, keepdim=True)
if not keepdims or o_max.numel() == 1:
o_max.squeeze_()
o_min.squeeze_()
return o_max, o_min
def _get_float_max_min(self, float_max, float_min):
if self._signed:
float_max = float_max.abs()
float_min = float_min.abs()
float_max_min = torch.stack((float_max, float_min), dim=0)
stack_max, _ = torch.max(float_max_min, dim=0)
stack_min = -stack_max
return stack_max, stack_min
else:
amin = float_min.min()
if amin >= 0:
return 0, float_max
else:
self._quant_max = int(2 ** (self._bitwidth - 1)) - 1
if not self._narrow_range:
self._quant_min = -int(2 ** (self._bitwidth - 1))
else:
self._quant_min = -int(2 ** (self._bitwidth - 1)) + 1
float_max = float_max.abs()
float_min = float_min.abs()
float_max_min = torch.stack((float_max, float_min), dim=0)
stack_max, _ = torch.max(float_max_min, dim=0)
stack_min = -stack_max
self._signed = True
return stack_max, stack_min
class PowerofTwoQuantPerChannelAlgo(PerChannelQuantAlgo):
def __init__(self, config, axis, node_type):
super().__init__(config, axis)
self._fix_pos = None
self._node_type = node_type
self._statistic_local = True
def calibrate(self, tensor):
scope = 5
with torch.no_grad():
if self._method == "diffs":
scope = 5
elif self._method == "maxmin":
scope = 1
if (self._node_type in [NNDCT_OP.INPUT, NNDCT_OP.QUANT_STUB]):
scope = 1
if self._round_method == "half_up":
mth = 2
elif self._round_method == "half_down":
mth = 6
elif self._round_method == "std_round":
mth = 3
elif self._round_method == "half_even":
if NndctOption.nndct_use_torch_quantizer.value is True:
mth = -1
else:
mth = 8
device = tensor.device
out_num = tensor.shape[self._axis]
Tbuffer = torch.empty_like(tensor).to(device)
self._fix_pos = torch.ones(out_num, dtype=tensor.dtype).to(device)
self._calib_cnt = self._calib_cnt + 1
# py_nndct.nn.NndctDiffsFixPosChannel(
# Tinput = tensor,
# Tbuffer = Tbuffer,
# Tfixpos = self._fix_pos,
# axis = self._axis,
# bit_width = self.bitwidth,
# scope = scope,
# method = mth)
input_split = torch.split(tensor, 1, dim=self._axis)
buffer_split = torch.split(Tbuffer, 1, dim=self._axis)
# TODO(@kewang): The split is a tensor view operation. Is it neccessary to clone tensor before calib and test ?
for i in range(len(input_split)):
self._fix_pos[i] = diffs_fix_pos(
input=input_split[i],
bit_width=self.bitwidth,
scope=scope,
method=mth)
if self._bitwidth <= 8:
max_fp = NndctOption.nndct_max_fix_position.value
#self._fix_pos = min(max_fp, self._fix_pos)
self._fix_pos = torch.where(self._fix_pos.long()>max_fp, max_fp, self._fix_pos.long())
else:
#self._fix_pos = min(15, self._fix_pos)
self._fix_pos = torch.where(self._fix_pos.long()>15, 15, self._fix_pos.long())
self.calib_scale()
# if self._statistic_local:
# self.calib_scale()
def calib_scale(self):
try:
self._scale = torch.where(self._fix_pos.to(torch.float32)>0, 1.0/2**self._fix_pos.to(torch.float32), 2**(-self._fix_pos.to(torch.float32)))
except OverflowError as e:
print("{}".format(repr(e)))
self._zero_point = torch.zeros(self._scale.shape[0], dtype=torch.int32, device=self._scale.device)
self._float_max = self._scale*self._quant_max
def calib_global_statis(self):
#self.calib_scale()
pass
class MaxMinQuantPerTensorAlgo(PerTensorQuantAlgo):
def __init__(self, config):
super().__init__(config)
def calibrate(self, tensor):
self._calib_cnt = self._calib_cnt + 1
tensor_max = torch.max(tensor)
tensor_min = torch.min(tensor)
if self._symmetric_mode =="symmetric":
new_max, new_min = self._get_float_max_min(tensor_max, tensor_min)
else:
new_max = tensor_max.item()
new_min = tensor_min.item()
if self._float_max is None or self._statistic_local:
self._float_max = new_max
else:
self._float_max = new_max if new_max > self._float_max else self._float_max
if self._float_min is None or self._statistic_local:
self._float_min = new_min
else:
self._float_min = new_min if new_min < self._float_min else self._float_min
if self._statistic_local:
self.calib_scale()
def calib_scale(self):
if self._symmetric_mode == "symmetric" :
self._scale = self._float_max/self._quant_max
self._zero_point = 0
else:
self._scale = (self._float_max-self._float_min)/(self._quant_max-self._quant_min)
self._zero_point = round(-self._float_min/self._scale).int32()
def calib_global_statis(self):
#self._scale = (self._float_max-self._float_min)/(self._quant_max-self._quant_min)
self.calib_scale()
class PercentileQuantPerTensorAlgo(HistogramQuantPerTensorAlgo):
def __init__(self, config):
super().__init__(config)
def calib_scale(self, calib_hist, calib_bin_edges):
percentage = self._config["percentage"]
if percentage < 0 or percentage > 100:
raise ValueError("Invalid percentage, Must be in range 0 <= percentage <= 100")
total = calib_hist.sum()
cdf = np.cumsum(calib_hist / total)
idx = np.searchsorted(cdf, percentage / 100)
calib_amax = calib_bin_edges[idx]
self._scale = calib_amax/self._quant_max
self._zero_point = 0
self._float_max = calib_amax
# return calib_amax
# total = calib_hist.sum()
# cdf = torch.cumsum(calib_hist/total, dim=0)
# idx = torch.searchsorted(cdf, percentage/100)
# calib_max = calib_bin_edges[idx].item()
# #calib_min = -calib_max
# #self._scale = (calib_max-calib_min)/(self._quant_max-self._quant_min)
# self._scale = calib_max/self._quant_max
# self._zero_point = 0
# self._float_max = calib_max
class MSEQuantPerTensorAlgo(HistogramQuantPerTensorAlgo):
def __init__(self, config):
super().__init__(config)
def calib_scale(self, calib_hist, calib_bin_edges):
start_bin = NndctOption.nndct_mse_start_bin.value
stride = NndctOption.nndct_mse_stride.value
counts = torch.from_numpy(calib_hist[:]).float()
edges = torch.from_numpy(calib_bin_edges[:]).float()
centers = (edges[1:] + edges[:-1]) / 2
# counts = calib_hist.float()
# edges = calib_bin_edges.float()
# centers = (edges[1:] + edges[:-1]) / 2
mses = []
arguments = []
for i in range(start_bin, len(centers), stride):
amax = centers[i]
#amin = -amax
#scale = (amax-amin)/(self._quant_max-self._quant_min)
scale = amax/self._quant_max
zero_point = 0
if self._round_method == "half_even":
# if NndctOption.nndct_tensorrt_quant_algo.value:
# quant_centers = fake_quantize_per_tensor_tensorrt(centers, amax,
# self._quant_min, self._quant_max)
# else:
# quant_centers = torch.fake_quantize_per_tensor_affine(centers, scale, zero_point,
# self._quant_min, self._quant_max)
quant_centers = fake_quantize_per_tensor_tensorrt(centers, amax,
self._quant_min, self._quant_max)
else:
if self._round_method == "half_up":
method = 2
elif self._round_method == "half_down":
method = 6
elif self._round_method == "std_round":
method = 3
self._scale = self._scale if self._scale > sys.float_info.min else 2**(-18)
quant_centers = fake_quantize_per_tensor(centers, 1.0/scale, zero_point,
self._quant_min, self._quant_max, method, True)
mse = ((quant_centers - centers)**2 * counts).mean().cpu().detach().item()
mses.append(mse)
arguments.append(i)
argmin = np.argmin(mses)
calib_max = centers[arguments[argmin]].cpu().detach().item()
#calib_min = -calib_max
#self._scale = (calib_max-calib_min)/(self._quant_max-self._quant_min)
self._scale = calib_max/self._quant_max
self._zero_point = 0
self._float_max = calib_max
class EntropyQuantPerTensorAlgo(HistogramQuantPerTensorAlgo):
def __init__(self, config):
super().__init__(config)
def calib_scale(self, calib_hist, calib_bin_edges):
start_bin = NndctOption.nndct_entropy_start_bin.value
stride = NndctOption.nndct_entropy_stride.value
#calib_hist = calib_hist.int().cpu().numpy()
# calib_hist = calib_hist.long().cpu().numpy()
# calib_bin_edges = calib_bin_edges.cpu().numpy()
def _normalize_distr(distr):
summ = np.sum(distr)
if summ != 0:
distr = distr / summ
bins = calib_hist[:]
bins[0] = bins[1]
total_data = np.sum(bins)
divergences = []
arguments = []
unsigned_bit = 0 if self._signed else 1
nbins = 1 << (self.bitwidth - 1 + unsigned_bit)
starting = start_bin
stop = len(bins)
new_density_counts = np.zeros(nbins, dtype=np.float64)
for i in range(starting, stop + 1, stride):
new_density_counts.fill(0)
space = np.linspace(0, i, num=nbins + 1)
digitized_space = np.digitize(range(i), space) - 1
digitized_space[bins[:i] == 0] = -1
for idx, digitized in enumerate(digitized_space):
if digitized != -1:
new_density_counts[digitized] += bins[idx]
counter = Counter(digitized_space)
for key, val in counter.items():
if key != -1:
new_density_counts[key] = new_density_counts[key] / val
new_density = np.zeros(i, dtype=np.float64)
for idx, digitized in enumerate(digitized_space):
if digitized != -1:
new_density[idx] = new_density_counts[digitized]
total_counts_new = np.sum(new_density) + np.sum(bins[i:])
_normalize_distr(new_density)
reference_density = np.array(bins[:len(digitized_space)], dtype=np.int64)
#reference_density = np.array(bins[:len(digitized_space)])
reference_density[-1] += np.sum(bins[i:])
total_counts_old = np.sum(reference_density)
if round(total_counts_new) != total_data or round(total_counts_old) != total_data:
raise RuntimeError("Count mismatch! total_counts_new={}, total_counts_old={}, total_data={}".format(
total_counts_new, total_counts_old, total_data))
_normalize_distr(reference_density)
ent = entropy(reference_density, new_density)
divergences.append(ent)
arguments.append(i)
divergences = np.array(divergences)
last_argmin = len(divergences) - 1 - np.argmin(divergences[::-1])
calib_max = calib_bin_edges[last_argmin * stride + starting]
#calib_min = -calib_max
#self._scale = (calib_max-calib_min)/(self._quant_max-self._quant_min)
self._scale = calib_max/self._quant_max
self._zero_point = 0
self._float_max = calib_max
class PowerofTwoQuantPerTensorAlgo(PerTensorQuantAlgo):
def __init__(self, config, node_type):
super().__init__(config)
self._fix_pos = None
self._node_type = node_type
self._statistic_local = True
def calibrate(self, tensor):
scope = 5
if self._method == "diffs":
scope = 5
elif self._method == "maxmin":
scope = 1
if (self._node_type in [NNDCT_OP.INPUT, NNDCT_OP.QUANT_STUB]):
scope = 1
if self._round_method == "half_up":
mth = 2
elif self._round_method == "half_down":
mth = 6
elif self._round_method == "std_round":
mth = 3
elif self._round_method == "half_even":
if NndctOption.nndct_use_torch_quantizer.value is True:
mth = -1
else:
mth = 8
device = tensor.device
Tbuffer = torch.empty_like(tensor).to(device)
Tfixpos = torch.tensor([1], dtype=tensor.dtype).to(device)
self._calib_cnt = self._calib_cnt + 1
# py_nndct.nn.NndctDiffsFixPos(
# Tinput = tensor,
# Tbuffer = Tbuffer,
# Tfixpos = Tfixpos,
# bit_width = self.bitwidth,
# range = scope,
# method = mth)
Tfixpos = diffs_fix_pos(
input=tensor,
bit_width=self.bitwidth,
scope=scope,
method=mth)
self._fix_pos = (int)(Tfixpos.item())
if self._bitwidth <= 8:
max_fp = NndctOption.nndct_max_fix_position.value
self._fix_pos = min(max_fp, self._fix_pos)
else:
self._fix_pos = min(15, self._fix_pos)
self.calib_scale()
# if self._statistic_local:
# self.calib_scale()
def calib_scale(self):
try:
self._scale = 1.0/2**self._fix_pos if self._fix_pos > 0 else 2**(-self._fix_pos)
except OverflowError as e:
print("{}".format(repr(e)))
self._zero_point = 0
self._float_max = self._scale*self._quant_max
def calib_global_statis(self):
#self._scale = (self._float_max-self._float_min)/(self._quant_max-self._quant_min)
#self.calib_scale()
pass
class CmpFlag(Enum):
"""
Enum for comparison flags
"""
EQUAL = 0
LESS = 1
LESS_EQUAL = 2
GREATER = 3
GREATER_EQUAL = 4
NOT_EQUAL = 5
def compare_torch_version(compare_type:CmpFlag, version:str):
if compare_type == CmpFlag.EQUAL:
return LooseVersion(torch.__version__) == LooseVersion(version)
if compare_type == CmpFlag.LESS:
return LooseVersion(torch.__version__) < LooseVersion(version)
if compare_type == CmpFlag.LESS_EQUAL:
return LooseVersion(torch.__version__) <= LooseVersion(version)
if compare_type == CmpFlag.GREATER:
return LooseVersion(torch.__version__) > LooseVersion(version)
if compare_type == CmpFlag.GREATER_EQUAL:
return LooseVersion(torch.__version__) >= LooseVersion(version)
if compare_type == CmpFlag.NOT_EQUAL:
return LooseVersion(torch.__version__) != LooseVersion(version)
def create_quant_algo(quant_strategy_info, node):
algo_config = quant_strategy_info
quant_algo = None
scale_type = algo_config.get("scale_type")
granularity = algo_config.get("granularity")
if granularity == "per_channel":
if compare_torch_version(CmpFlag.LESS, "1.5.0"):
NndctScreenLogger().error2user(QError.TORCH_VERSION, f"Torch should uptate to 1.5.0 or higher version if per_channel quantization.")
exit(2)
op_type = node.op.type
if scale_type == "float":
if granularity == "per_channel":
axis = None
#group = node.node_attr[node.op.AttrName.GROUP]
if op_type in _CONV_LINEAR_TYPES:
axis = 0
elif op_type in _CONV_TRANSPOSE_TYPES:
axis = 1
elif op_type in _CONV_NORM_TYPES:
axis = 0
else:
NndctScreenLogger().error2user(QError.QUANT_CONFIG, f"'{op_type}' is not supported in per channel quantization. ")
exit(2)
quant_algo = FloatQuantPerChannelAlgo(algo_config, axis)
elif granularity == "per_tensor":
method = algo_config.get("method")
if method == "maxmin":
quant_algo = MaxMinQuantPerTensorAlgo(algo_config)
elif method == "percentile":
quant_algo = PercentileQuantPerTensorAlgo(algo_config)
elif method == "mse":
quant_algo = MSEQuantPerTensorAlgo(algo_config)
elif method == "entropy":
quant_algo = EntropyQuantPerTensorAlgo(algo_config)
elif scale_type == "poweroftwo":
if granularity == "per_tensor":
quant_algo = PowerofTwoQuantPerTensorAlgo(algo_config, op_type)
elif granularity == "per_channel":
axis = None
#group = node.node_attr[node.op.AttrName.GROUP]
if op_type in _CONV_LINEAR_TYPES:
axis = 0
elif op_type in _CONV_TRANSPOSE_TYPES:
axis = 1
quant_algo = PowerofTwoQuantPerChannelAlgo(algo_config, axis, op_type)
return quant_algo | null |
23,784 | import abc
import copy
from torch import nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import module_util as mod_util
def quantize_input(module, index, quantizer):
"""Insert a quantizer for quantizing the input of the module.
The input module is modified inplace with added quantizer module
and forward_pre_hooks.
Args:
module: Input module that we want to quantize.
index: The index of module's inputs to be quantized.
quantizer: Module of quantizer to be added.
"""
quantizer_name_template = 'input%d_quantizer'
def _forward_pre_hook(self, input):
"""Forward hook that calls quantizer on the input"""
quantized_input = []
for i, inp in enumerate(input):
quantizer_name = quantizer_name_template % i
if hasattr(self, quantizer_name):
quantized_input.append(getattr(self, quantizer_name)(inp))
else:
quantized_input.append(inp)
return tuple(quantized_input)
#TODO(yuwang): Support torch.nn.Sequential
module.add_module(quantizer_name_template % index, quantizer)
# Register quantizer as the last entry in the hook list.
# All forward pre hooks are preserved and will be executed before the quantizer.
quantizer_flag_name = '_input_quantizer_hook_registered'
if not hasattr(module, quantizer_flag_name):
setattr(module, quantizer_flag_name, True)
handle = module.register_forward_pre_hook(_forward_pre_hook)
module._forward_pre_hooks.move_to_end(handle.id, last=True)
def quantize_output(module, quantizer):
"""Insert a quantizer for quantizing the output of the module.
The input module is modified inplace with added quantizer module
and forward_hooks
Args:
module: Input module that we want to quantize.
quantizer: Module of quantizer to be added.
"""
def _forward_hook(self, input, output):
"""Forward hook that calls quantizer on the output"""
return self.output_quantizer(output)
def _forward_hook_max(self, input, output):
"""Forward hook that calls quantizer on the output for functional.Max."""
quantized_values = self.output_quantizer(output[0])
# (values, indices)
return (quantized_values, output[1])
quantizer_flag_name = '_output_quantizer_hook_registered'
if hasattr(module, quantizer_flag_name):
raise RuntimeError('Insert multiple quantizers to a module: {}'.format(
type(module)))
#TODO(yuwang): Support torch.nn.Sequential
module.add_module('output_quantizer', quantizer)
setattr(module, quantizer_flag_name, True)
# Register quantizer as the first entry in the hook list.
# All post forward hooks are preserved and will be executed after the quantizer.
if isinstance(module, functional.Max):
handle = module.register_forward_hook(_forward_hook_max)
else:
handle = module.register_forward_hook(_forward_hook)
module._forward_hooks.move_to_end(handle.id, last=False)
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
The provided code snippet includes necessary dependencies for implementing the `insert_quantizer` function. Write a Python function `def insert_quantizer(model_topo)` to solve the following problem:
Insert quantizer for quantizing input/output of a module. The quantization of weight/bias is handled by quantized module itself.
Here is the function:
def insert_quantizer(model_topo):
"""Insert quantizer for quantizing input/output of a module.
The quantization of weight/bias is handled by quantized module itself.
"""
quantized_modules = set()
for node in model_topo.nodes:
rt_spec = node.spec
if not rt_spec:
continue
if node.name in quantized_modules:
continue
logging.vlog(
3, 'Inserting quantizer for node {}: {}'.format(node.name, rt_spec))
quantized_modules.add(node.name)
for index, quantizer in enumerate(rt_spec.input_quantizers):
quantize_input(node.module, index, quantizer)
output_quantizers = rt_spec.output_quantizers
if len(output_quantizers) > 1:
raise NotImplementedError('Multiple outputs tensor not supported yet.')
if output_quantizers:
quantize_output(node.module, output_quantizers[0]) | Insert quantizer for quantizing input/output of a module. The quantization of weight/bias is handled by quantized module itself. |
23,785 | from torch import nn
import numpy as np
import torch
import types
from nndct_shared.base import NNDCT_OP
from nndct_shared.pruning.pruner import load_pruning_info
from pytorch_nndct.utils import TorchGraphSymbol
from pytorch_nndct.utils import tensor_util
def get_module(model, submodule_key):
tokens = submodule_key.split('.')
cur_mod = model
for s in tokens:
cur_mod = getattr(cur_mod, s)
return cur_mod
def set_module(model, submodule_key, module):
tokens = submodule_key.split('.')
sub_tokens = tokens[:-1]
cur_mod = model
for s in sub_tokens:
cur_mod = getattr(cur_mod, s)
setattr(cur_mod, tokens[-1], module)
def copy_module_weights(src_module, dst_module):
if isinstance(src_module,
(torch.nn.Conv2d, torch.nn.ConvTranspose2d)) or isinstance(
dst_module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
if isinstance(src_module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
dst_module.conv.weight.data.copy_(src_module.weight.data)
if src_module.bias is not None:
dst_module.conv.bias.data.copy_(src_module.bias.data)
else:
dst_module.weight.data.copy_(
src_module.get_active_filter(dst_module.in_channels,
dst_module.out_channels,
dst_module.kernel_size).data)
if dst_module.bias is not None:
dst_module.bias.data.copy_(
src_module.get_active_bias(dst_module.out_channels).data)
elif isinstance(src_module, torch.nn.Linear) or isinstance(
dst_module, torch.nn.Linear):
if isinstance(src_module, torch.nn.Linear):
dst_module.linear.weight.data.copy_(src_module.weight.data)
if src_module.bias is not None:
dst_module.linear.bias.data.copy_(src_module.bias.data)
else:
dst_module.weight.data.copy_(
src_module.get_active_filter(dst_module.in_features,
dst_module.out_features).data)
if dst_module.bias is not None:
dst_module.bias.data.copy_(
src_module.get_active_bias(dst_module.out_features).data)
elif isinstance(src_module, torch.nn.BatchNorm2d) or isinstance(
dst_module, torch.nn.BatchNorm2d):
if isinstance(src_module, torch.nn.BatchNorm2d):
dst_module.bn.weight.data.copy_(
src_module.weight.data[:dst_module.bn.num_features])
dst_module.bn.bias.data.copy_(
src_module.bias.data[:dst_module.bn.num_features])
dst_module.bn.running_mean.data.copy_(
src_module.running_mean.data[:dst_module.bn.num_features])
dst_module.bn.running_var.data.copy_(
src_module.running_var.data[:dst_module.bn.num_features])
else:
dst_module.weight.data.copy_(
src_module.bn.weight.data[:dst_module.num_features])
dst_module.bias.data.copy_(
src_module.bn.bias.data[:dst_module.num_features])
dst_module.running_mean.data.copy_(
src_module.bn.running_mean.data[:dst_module.num_features])
dst_module.running_var.data.copy_(
src_module.bn.running_var.data[:dst_module.num_features])
def replace_modules(model, module_names, new_modules, copy_ckpt=False):
if not isinstance(module_names, (tuple, list)):
module_names = [module_names]
if not isinstance(new_modules, (tuple, list)):
new_modules = [new_modules]
assert len(new_modules) <= len(module_names)
modules = []
for name in module_names:
modules.append(get_module(model, name))
for handle_id, pre_hook_fn in modules[0]._forward_pre_hooks.items():
new_modules[0].register_forward_pre_hook(pre_hook_fn)
del modules[0]._forward_pre_hooks[handle_id]
# Move post forward hooks of the last module to resulting fused module
for handle_id, hook_fn in modules[-1]._forward_hooks.items():
new_modules[-1].register_forward_hook(hook_fn)
del modules[-1]._forward_hooks[handle_id]
# Use Identity to fill in the missing modules.
for i in range(len(module_names) - len(new_modules)):
new_modules.append(torch.nn.Identity())
new_modules[i].training = modules[0].training
for i, name in enumerate(module_names):
set_module(model, name, new_modules[i])
if copy_ckpt:
copy_module_weights(modules[i], new_modules[i]) | null |
23,786 | from torch import nn
import numpy as np
import torch
import types
from nndct_shared.base import NNDCT_OP
from nndct_shared.pruning.pruner import load_pruning_info
from pytorch_nndct.utils import TorchGraphSymbol
from pytorch_nndct.utils import tensor_util
def state_dict_from_node(node):
def create_module_by_node(module_cls, node):
attrs = {name: node.op.get_config(name) for name in node.op.configs}
module = module_cls(**attrs)
state_dict = state_dict_from_node(node)
module_state_dict = {
tensor_name.split('.')[-1]: tensor
for tensor_name, tensor in state_dict.items()
}
module.load_state_dict(module_state_dict)
return module | null |
23,787 | from torch import nn
import numpy as np
import torch
import types
from nndct_shared.base import NNDCT_OP
from nndct_shared.pruning.pruner import load_pruning_info
from pytorch_nndct.utils import TorchGraphSymbol
from pytorch_nndct.utils import tensor_util
def slim_model_from_state_dict(model, state_dict):
"""Modify modules according to their weights in the state dict and
load the state dict to the model.
Args:
model: An torch.nn.Module instance to load state dict.
state_dict: A state dict to be loaded.
Returns:
A modified model that matchs weights shape in the state dict.
"""
for key, module in model.named_modules():
weight_key = key + '.weight'
bias_key = key + '.bias'
if isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d)):
module.weight = nn.Parameter(state_dict[weight_key])
module.bias = nn.Parameter(state_dict[bias_key])
module.running_mean = state_dict[key + '.running_mean']
module.running_var = state_dict[key + '.running_var']
module.num_features = module.weight.size(0)
elif isinstance(module, (nn.Conv2d, nn.Conv3d)):
assert module.groups == 1
module.weight = nn.Parameter(state_dict[weight_key])
if bias_key in state_dict:
module.bias = nn.Parameter(state_dict[bias_key])
module.out_channels = module.weight.size(0)
module.in_channels = module.weight.size(1)
elif isinstance(module, (nn.ConvTranspose2d, nn.ConvTranspose3d)):
assert module.groups == 1
module.weight = nn.Parameter(state_dict[weight_key])
if bias_key in state_dict:
module.bias = nn.Parameter(state_dict[bias_key])
module.in_channels = module.weight.size(0)
module.out_channels = module.weight.size(1)
elif isinstance(module, nn.Linear):
module.weight = nn.Parameter(state_dict[weight_key])
if bias_key in state_dict:
module.bias = nn.Parameter(state_dict[bias_key])
module.out_features = module.weight.size(0)
module.in_features = module.weight.size(1)
else:
pass
model.load_state_dict(state_dict)
return model
def load_pruning_info(filepath):
with open(filepath, 'r') as f:
content = json.load(f)
return ModulePruningInfoGenerator.deserialize(content)
The provided code snippet includes necessary dependencies for implementing the `transform_to_slim_model` function. Write a Python function `def transform_to_slim_model(model, state_dict, json_path)` to solve the following problem:
Transform a sparse model to a slim model by pruning results. Args: model: A sparse torch.nn.Module instance. path: File path that saves pruning results. Returns: A slim model.
Here is the function:
def transform_to_slim_model(model, state_dict, json_path):
"""Transform a sparse model to a slim model by pruning results.
Args:
model: A sparse torch.nn.Module instance.
path: File path that saves pruning results.
Returns:
A slim model.
"""
module_pruning_info = load_pruning_info(json_path).module_pruning_info
def change_tensor(tensor, pruning_info, axises=[0, 1]):
tensor = tensor.cpu()
removed_inputs = pruning_info.removed_inputs
removed_outputs = pruning_info.removed_outputs
if tensor.ndim in [2, 4, 5]:
new_tensor = np.delete(tensor, removed_outputs, axis=axises[0])
new_tensor = np.delete(new_tensor, removed_inputs, axis=axises[1])
elif tensor.ndim == 1:
new_tensor = np.delete(tensor, removed_outputs, axis=0)
else:
pass
return new_tensor
for key, module in model.named_modules():
weight_key = key + '.weight'
if key not in module_pruning_info:
continue
pruning_info = module_pruning_info[key]
bias_key = key + '.bias'
if isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d)):
state_dict[weight_key] = change_tensor(state_dict[weight_key],
pruning_info)
state_dict[bias_key] = change_tensor(state_dict[bias_key], pruning_info)
running_mean_key = key + '.running_mean'
state_dict[running_mean_key] = change_tensor(state_dict[running_mean_key],
pruning_info)
running_var_key = key + '.running_var'
state_dict[running_var_key] = change_tensor(state_dict[running_var_key],
pruning_info)
elif isinstance(module, (nn.Conv2d, nn.Conv3d)):
state_dict[weight_key] = change_tensor(state_dict[weight_key],
pruning_info)
if bias_key in state_dict:
state_dict[bias_key] = change_tensor(state_dict[bias_key], pruning_info)
elif isinstance(module, (nn.ConvTranspose2d, nn.ConvTranspose3d)):
state_dict[weight_key] = change_tensor(
state_dict[weight_key], pruning_info, axises=[1, 0])
if bias_key in state_dict:
state_dict[bias_key] = change_tensor(state_dict[bias_key], pruning_info)
elif isinstance(module, nn.Linear):
state_dict[weight_key] = change_tensor(state_dict[weight_key],
pruning_info)
if bias_key in state_dict:
state_dict[bias_key] = change_tensor(state_dict[bias_key], pruning_info)
else:
pass
return slim_model_from_state_dict(model, state_dict) | Transform a sparse model to a slim model by pruning results. Args: model: A sparse torch.nn.Module instance. path: File path that saves pruning results. Returns: A slim model. |
23,788 | from torch import nn
import numpy as np
import torch
import types
from nndct_shared.base import NNDCT_OP
from nndct_shared.pruning.pruner import load_pruning_info
from pytorch_nndct.utils import TorchGraphSymbol
from pytorch_nndct.utils import tensor_util
def setattr_if_has(module, name, attr):
if not hasattr(module, name):
raise AttributeError(
'Attempting to set an non-existing attribute "{}"'.format(name))
setattr(module, name, attr) | null |
23,789 | from torch import nn
import numpy as np
import torch
import types
from nndct_shared.base import NNDCT_OP
from nndct_shared.pruning.pruner import load_pruning_info
from pytorch_nndct.utils import TorchGraphSymbol
from pytorch_nndct.utils import tensor_util
def enable_dump_blob(model, graph=False, mode='print'):
def enable_print_blob(model, graph=False):
enable_dump_blob(model, graph, mode='print') | null |
23,790 | from torch import nn
import numpy as np
import torch
import types
from nndct_shared.base import NNDCT_OP
from nndct_shared.pruning.pruner import load_pruning_info
from pytorch_nndct.utils import TorchGraphSymbol
from pytorch_nndct.utils import tensor_util
def enable_dump_blob(model, graph=False, mode='print'):
node_attr = '_dbg_node'
prefix_attr = '_dbg_prefix'
inp_attr = '_dbg_input'
out_attr = '_dbg_output'
saver_attr = '_saver_hooked'
def save_io(module, input, output):
setattr(module, inp_attr, input)
setattr(module, out_attr, output)
def print_saved_io(module):
saved_inputs = getattr(module, inp_attr, None)
saved_outputs = getattr(module, out_attr, None)
if saved_inputs is None or saved_outputs is None:
print('[WARN] No saved blob: node={}, module={}\n'.format(
node_name, module_name))
return
print_io(module, saved_inputs, saved_outputs)
def print_io(module, input, output):
node = getattr(module, node_attr) if hasattr(module, node_attr) else None
module_name = getattr(module, prefix_attr) if hasattr(module,
prefix_attr) else None
if node:
print('node({}): {}'.format(node.idx, node.name))
if module_name:
print('module: {}({})'.format(module_name, type(module)))
if hasattr(module, 'export_quant_info'):
print('quant_info:', module.export_quant_info())
# saved_inputs/outputs may be empty tuple.
print_blob(input, 'input')
print_blob(output, 'output')
print('')
def print_blob(blob, prefix):
if isinstance(blob, tuple) and len(blob) > 0:
for idx, tensor in enumerate(blob):
if isinstance(tensor, torch.Tensor):
print('{}[{}]: sum={}, dtype={}, shape={}'.format(
prefix, idx, tensor.sum(), tensor.dtype, tensor.shape))
else:
print('{}{}: {}'.format(prefix, idx, tensor))
elif isinstance(blob, torch.Tensor):
print('{}: sum={}, dtype={}, shape={}'.format(prefix, blob.sum(),
blob.dtype, blob.shape))
else:
print(prefix, None)
def print_saved_blob(model):
model.apply(print_saved_io)
# Hook the node to module if the graph is provided.
if graph:
for node in graph.nodes:
module = get_module_by_node(model, node)
if module:
setattr(module, node_attr, node)
hook_func = save_io if mode == 'save' else print_io
for name, module in model.named_modules():
setattr(module, prefix_attr, name)
module.register_forward_hook(hook_func)
if mode == 'save':
model.print_saved_blob = types.MethodType(print_saved_blob, model)
return model
def enable_save_blob(model, graph=False):
enable_dump_blob(model, graph, mode='save') | null |
23,791 | from torch import nn
import numpy as np
import torch
import types
from nndct_shared.base import NNDCT_OP
from nndct_shared.pruning.pruner import load_pruning_info
from pytorch_nndct.utils import TorchGraphSymbol
from pytorch_nndct.utils import tensor_util
def visualize_tensors(model):
from nndct_shared.utils import Plotter
def plot_output_tensor(op, inputs, outputs):
if isinstance(outputs, torch.Tensor):
Plotter().plot_hist(op.node.name,
outputs.cpu().detach().numpy().flatten())
for mod in model.modules():
if mod is not model:
mod.register_forward_hook(plot_output_tensor) | null |
23,792 | from nndct_shared.pruning.pruning_lib import PruningSpec
from nndct_shared.nndct_graph import Graph
from nndct_shared.pruning.sensitivity import NetSensitivity
from typing import Mapping, List
from pytorch_nndct.utils import TorchGraphSymbol, logging
def extract_scope_name(node_name: str) -> str:
def extract_scope_name_node_name_map(graph: Graph) -> Mapping[str, List[str]]:
class PruningSpec(object):
def __init__(self,
groups=None,
channel_divisible: int = 2,
graph_digest: str = None):
def __str__(self):
def __eq__(self, other):
def from_node_groups(cls, groups: List[NodeGroup], ratio: Union[float, Tuple,
List]):
def channel_divisible(self):
def channel_divisible(self, channel_divisible: int) -> None:
def graph_digest(self) -> str:
def graph_digest(self, graph_digest: str) -> None:
def groups(self):
def add_group(self, group):
def group(self, node_name):
def serialize(self):
def deserialize(cls, data):
])
def calibrate_spec(spec: PruningSpec, graph: Graph) -> PruningSpec:
scope_name_node_name_map = extract_scope_name_node_name_map(graph)
for group in spec.groups:
for idx, node in enumerate(group.nodes):
scope_name = extract_scope_name(node)
assert scope_name in scope_name_node_name_map, f"Missing scope_name: '{scope_name}' in graph"
if len(scope_name_node_name_map[scope_name]) > 0:
return spec
new_name = scope_name_node_name_map[scope_name][0]
group.nodes[idx] = new_name
return PruningSpec(spec.groups, spec.channel_divisible) | null |
23,793 | from nndct_shared.pruning.pruning_lib import PruningSpec
from nndct_shared.nndct_graph import Graph
from nndct_shared.pruning.sensitivity import NetSensitivity
from typing import Mapping, List
from pytorch_nndct.utils import TorchGraphSymbol, logging
def extract_scope_name(node_name: str) -> str:
def extract_scope_name_node_name_map(graph: Graph) -> Mapping[str, List[str]]:
class NetSensitivity(object):
def __init__(self, groups: List[GroupMetrics] = [], graph_digest: str = None):
def add_group(self, nodes, metrics, num_groups: int = 1):
def uncompleted_steps(self):
def uncompleted_steps(self, uncompleted_steps):
def graph_digest(self) -> str:
def graph_digest(self, graph_digest: str) -> None:
def groups(self):
def groups(self, groups):
def prunable_groups_by_threshold(self, threshold, excludes=[]):
def __repr__(self):
def calibrate_sens(sens: NetSensitivity, graph: Graph) -> NetSensitivity:
scope_name_node_name_map = extract_scope_name_node_name_map(graph)
for group in sens.groups:
for idx, node in enumerate(group.nodes):
scope_name = extract_scope_name(node)
assert scope_name in scope_name_node_name_map, f"Missing scope_name: '{scope_name}' in graph"
if len(scope_name_node_name_map[scope_name]) > 0:
return sens
new_name = scope_name_node_name_map[scope_name][0]
group.nodes[idx] = new_name
ret = NetSensitivity()
ret.groups = sens.groups
ret.uncompleted_steps = ret.uncompleted_steps
return ret | null |
23,794 | import torch
def fuse_conv_bn_weight(conv_weight,
conv_bias,
running_mean,
running_var,
gamma,
beta,
eps,
transposed=False):
if conv_bias is None:
conv_bias = torch.zeros_like(running_mean)
if gamma is None:
gamma = torch.ones_like(running_mean)
if beta is None:
beta = torch.zeros_like(running_mean)
var_rsqrt = torch.rsqrt(running_var + eps)
if transposed:
shape = [1, -1] + [1] * (len(conv_weight.shape) - 2)
else:
shape = [-1, 1] + [1] * (len(conv_weight.shape) - 2)
conv_weight = conv_weight * (gamma * var_rsqrt).reshape(shape)
conv_bias = (conv_bias - running_mean) * var_rsqrt * gamma + beta
return torch.nn.Parameter(conv_weight), torch.nn.Parameter(conv_bias)
def fuse_conv_bn(conv, bn, inplace=True):
assert (not (conv.training or
bn.training)), "Fusion only for evaluation mode!"
fused_conv = conv if inplace else copy.deepcopy(conv)
fused_conv.weight, fused_conv.bias = fuse_conv_bn_weight(
fused_conv.weight, fused_conv.bias, bn.running_mean, bn.running_var,
bn.weight, bn.bias, bn.eps, conv.transposed)
return fused_conv | null |
23,801 | import torch
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
class CmpFlag(Enum):
"""
Enum for comparison flags
"""
EQUAL = 0
LESS = 1
LESS_EQUAL = 2
GREATER = 3
GREATER_EQUAL = 4
NOT_EQUAL = 5
def compare_torch_version(compare_type:CmpFlag, version:str):
if compare_type == CmpFlag.EQUAL:
return LooseVersion(torch.__version__) == LooseVersion(version)
if compare_type == CmpFlag.LESS:
return LooseVersion(torch.__version__) < LooseVersion(version)
if compare_type == CmpFlag.LESS_EQUAL:
return LooseVersion(torch.__version__) <= LooseVersion(version)
if compare_type == CmpFlag.GREATER:
return LooseVersion(torch.__version__) > LooseVersion(version)
if compare_type == CmpFlag.GREATER_EQUAL:
return LooseVersion(torch.__version__) >= LooseVersion(version)
if compare_type == CmpFlag.NOT_EQUAL:
return LooseVersion(torch.__version__) != LooseVersion(version)
def support_onnx_export():
return compare_torch_version(CmpFlag.GREATER_EQUAL, "1.7") | null |
23,802 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def remove_outter_model_debug_name(graph):
from pytorch_nndct.parse.rich_in_out_helper import FlattenInOutModelForTrace
node_list = [node for node in graph.nodes()]
for node in graph.nodes():
for in_tensor in node.inputs():
if FlattenInOutModelForTrace.check_need_recovery_name(unique_name(in_tensor)):
set_unique_name(in_tensor, FlattenInOutModelForTrace.recovery_tensor_name(unique_name(in_tensor)))
for out_tensor in node.outputs():
if FlattenInOutModelForTrace.check_need_recovery_name(unique_name(out_tensor)):
set_unique_name(out_tensor, FlattenInOutModelForTrace.recovery_tensor_name(unique_name(out_tensor)))
def split_scalar_const(g):
def _split_scalar_const(blocks):
for block in blocks:
for node in block.nodes():
_split_scalar_const(node.blocks())
if node_type(node) in ["aten::add", "aten::mul", "aten::div", "aten::sub"]:
if node_type(get_in_node_at(node, 1)) == "prim::Constant" and get_node_output_type(get_in_node_at(node, 1)) in ["FloatType"]:
const = get_attr_value(get_in_node_at(node, 1), "value")
if all([node is use.user for use in node.inputsAt(1).uses()]):
continue
clone_const_value = g.insertConstant(const)
clone_const_value.node().moveBefore(node)
# set_attr_value(clone_const_value.node(), "value", const)
node.replaceInput(1, clone_const_value)
_split_scalar_const([g])
def split_shared_bias(g):
def _split_shared_bias(blocks):
for block in blocks:
for node in block.nodes():
_split_shared_bias(node.blocks())
if node_type(node) in ["aten::linear", "aten::_convolution"]:
if node_type(get_in_node_at(node, 1)) != "prim::Constant" or node_type(get_in_node_at(node, 2)) != "prim::Constant":
continue
if unique_name(node.inputsAt(1)).split(".")[-1] != "weight":
weight_prefix = ".".join(unique_name(node.inputsAt(1)).split(".")[:-2])
bias_suffix = ".".join(["bias", unique_name(node.inputsAt(1)).split(".")[-1]])
else:
weight_prefix = ".".join(unique_name(node.inputsAt(1)).split(".")[:-1])
bias_suffix = "bias"
if unique_name(node.inputsAt(2)).split(".")[-1] != "bias":
bias_prefix = ".".join(unique_name(node.inputsAt(2)).split(".")[:-2])
else:
bias_prefix = ".".join(unique_name(node.inputsAt(2)).split(".")[:-1])
if weight_prefix != bias_prefix:
weight_value = get_attr_value(get_in_node_at(node, 1), "value")
is_transpose = bool(get_attr_value(get_in_node_at(node, 6), "value"))
shape = weight_value.size(0) if is_transpose is False else weight_value.size(1)
bias_value = torch.zeros(shape, device=weight_value.device, requires_grad=weight_value.requires_grad)
const_value = g.insertConstant(bias_value)
const_value.node().moveBefore(node)
set_attr_value(const_value.node(), "value", bias_value)
set_unique_name(const_value, ".".join([weight_prefix, bias_suffix]))
node.replaceInput(2, const_value)
_split_shared_bias([g])
def split_shared_const_tensor(g):
def _split_shared_const_tensor(blocks):
for block in blocks:
for node in block.nodes():
_split_shared_const_tensor(node.blocks())
if node_type(node) == "prim::Constant" and get_node_output_type(node) == _TENSOR and get_node_output_scalar_type(node) == _FLOAT:
if len(get_users(node)) == 1:
continue
for id, use in enumerate(_get_uses(node)[1:], 1):
value = get_attr_value(node, "value")
new_value = torch.tensor(value.clone().detach(), device=value.device, requires_grad=value.requires_grad)
new_const_value = g.insertConstant(new_value)
new_const_value.node().moveBefore(use.user)
set_attr_value(new_const_value.node(), "value", new_value)
set_unique_name(new_const_value, "nndct_" + ".".join([unique_name(node.output()), str(id)]))
use.user.replaceInput(use.offset, new_const_value)
_split_shared_const_tensor([g])
def convert_scalar_to_const(g):
def _convert_scalar_to_const(blocks):
for block in blocks:
for node in block.nodes():
_convert_scalar_to_const(node.blocks())
if node_type(node) in ["aten::add", "aten::mul", "aten::div", "aten::sub"]:
if node_type(get_in_node_at(node, 1)) == "prim::Constant" and get_node_output_type(node) == "TensorType":
const = get_attr_value(get_in_node_at(node, 1), "value")
clone_const_tensor = g.insertConstant(torch.tensor(const, dtype=const_type_converter.get(get_node_output_type(get_in_node_at(node, 1)), torch.float32)))
clone_const_tensor.node().moveBefore(node)
node.replaceInput(1, clone_const_tensor)
_convert_scalar_to_const([g])
def post_process_script_graph(graph):
split_shared_const_tensor(graph)
split_shared_bias(graph)
split_scalar_const(graph)
convert_scalar_to_const(graph)
remove_outter_model_debug_name(graph)
# remove_dce_node(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph) | null |
23,803 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def _node_destroy(node):
def _find_all_nodes(graph, node_kind, recurse=True):
def remove_ops(graph, kind, op_remove_handler, recurse=True):
remove_nodes = _find_all_nodes(graph, kind)
for node in remove_nodes:
op_remove_handler(node)
for node in remove_nodes:
_node_destroy(node) | null |
23,804 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_users(node):
return [use.user for use in _get_uses(node)]
def get_fw_op_nodes(graph):
ops = []
for node in graph.nodes():
if node.outputsSize() == 0:
continue
if node.outputsSize() > 1:
node_name = "_".join(get_node_outputs_name(node))
else:
node_name = get_node_output_name(node)
if node.kind() != "prim::GetAttr":
ops.append((node_name, node))
return ops
def node_type(node):
return node.kind()
def node_inputs(node):
return [inp for inp in node.inputs()]
def node_outputs(node):
return [out for out in node.outputs()]
def node_blocks(node):
return [block for block in node.blocks()]
def has_block(node):
return False if not list(node.blocks()) else True
def _collect_unpack_pack_pair(graph, remove_nodes=None):
if remove_nodes is None:
remove_nodes = []
for _, node in get_fw_op_nodes(graph):
if node_type(node) in ["prim::TupleUnpack", "prim::ListUnpack"]:
use_by_other = False
for user in get_users(node):
if user in remove_nodes:
continue
if node_type(user) in ["prim::TupleConstruct", "prim::ListConstruct"]:
remove_nodes.append(user)
for index, out in enumerate(node_outputs(user)):
out.replaceAllUsesWith(node_inputs(node)[index])
else:
use_by_other = True
if not use_by_other:
remove_nodes.append(node)
elif has_block(node):
for block in node_blocks(node):
_collect_unpack_pack_pair(block, remove_nodes)
return remove_nodes | null |
23,805 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_users(node):
return [use.user for use in _get_uses(node)]
def get_fw_op_nodes(graph):
ops = []
for node in graph.nodes():
if node.outputsSize() == 0:
continue
if node.outputsSize() > 1:
node_name = "_".join(get_node_outputs_name(node))
else:
node_name = get_node_output_name(node)
if node.kind() != "prim::GetAttr":
ops.append((node_name, node))
return ops
def node_type(node):
return node.kind()
def node_inputs(node):
return [inp for inp in node.inputs()]
def node_outputs(node):
return [out for out in node.outputs()]
def node_blocks(node):
return [block for block in node.blocks()]
def has_block(node):
return False if not list(node.blocks()) else True
def _collect_pack_unpack_pair(graph, remove_nodes=None):
# TupleConstruct + TupleUnpack
if remove_nodes is None:
remove_nodes = []
for _, node in get_fw_op_nodes(graph):
if node_type(node) in ["prim::TupleConstruct", "prim::ListConstruct"]:
use_by_other = False
for user in get_users(node):
if node_type(user) in ["prim::TupleUnpack", "prim::ListUnpack"]:
remove_nodes.append(user)
for index, out in enumerate(node_outputs(user)):
out.replaceAllUsesWith(node_inputs(node)[index])
else:
use_by_other = True
if not use_by_other:
remove_nodes.append(node)
elif has_block(node):
for block in node_blocks(node):
_collect_pack_unpack_pair(block, remove_nodes)
return remove_nodes | null |
23,806 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_attr_value(node: torch.Node, attr_name: str):
sel = node.kindOf(attr_name)
if sel in ['ival']:
return node.output().toIValue()
else:
return getattr(node, sel)(attr_name)
def get_users(node):
return [use.user for use in _get_uses(node)]
def get_fw_op_nodes(graph):
ops = []
for node in graph.nodes():
if node.outputsSize() == 0:
continue
if node.outputsSize() > 1:
node_name = "_".join(get_node_outputs_name(node))
else:
node_name = get_node_output_name(node)
if node.kind() != "prim::GetAttr":
ops.append((node_name, node))
return ops
def node_type(node):
return node.kind()
def node_inputs(node):
return [inp for inp in node.inputs()]
def node_outputs(node):
return [out for out in node.outputs()]
def node_blocks(node):
return [block for block in node.blocks()]
def has_block(node):
return False if not list(node.blocks()) else True
def _collect_tuple_index(graph, remove_nodes=None):
# TupleConstruct + TupleIndex
if remove_nodes is None:
remove_nodes = []
for _, node in get_fw_op_nodes(graph):
if node_type(node) == "prim::TupleConstruct":
use_by_other = False
for user in get_users(node):
if node_type(user) == "prim::TupleIndex":
index = node_inputs(user)[-1]
if node_type(index.node()) == "prim::Constant":
index_value = get_attr_value(index.node(), "value")
node_outputs(user)[0].replaceAllUsesWith(node_inputs(node)[index_value])
remove_nodes.append(user)
else:
use_by_other = True
if not use_by_other:
remove_nodes.append(node)
elif has_block(node):
for block in node_blocks(node):
_collect_tuple_index(block, remove_nodes)
return remove_nodes | null |
23,807 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def _optimize_graph_19(graph, is_jit_graph=False, module=None):
from torch.onnx.utils import _split_tensor_list_constants
# Inline everything
torch._C._jit_pass_inline(graph)
# Remove fork/wait nodes
torch._C._jit_pass_inline_fork_wait(graph)
torch._C._jit_pass_lint(graph)
if not is_jit_graph:
torch._C._jit_pass_lower_all_tuples(graph)
# we record now record some ops like ones/zeros
# into a trace where we previously recorded constants
# use constant prop to maintain our current level of onnx support
# without implementing symbolics for all of them
torch._C._jit_pass_constant_propagation(graph)
_split_tensor_list_constants(graph, graph)
# run dce to eliminate dead parts of the graph that might have been
# left behind by things like symbolic_override
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_canonicalize_graph_fuser_ops(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_peephole(graph, True)
torch._C._jit_pass_fuse_addmm(graph)
torch._C._jit_pass_lint(graph)
if is_jit_graph:
torch._C._jit_pass_peephole(graph, True)
# torch._C._jit_pass_lower_all_tuples(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_onnx_remove_print(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
return graph
def _optimize_graph_17(graph):
# Inline everything
from torch.onnx.utils import _split_tensor_list_constants
torch._C._jit_pass_inline(graph)
# Remove fork/wait nodes
torch._C._jit_pass_inline_fork_wait(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_remove_inplace_ops(graph)
# we record now record some ops like ones/zeros
# into a trace where we previously recorded constants
# use constant prop to maintain our current level of onnx support
# without implementing symbolics for all of them
torch._C._jit_pass_constant_propagation(graph)
_split_tensor_list_constants(graph, graph)
# run dce to eliminate dead parts of the graph that might have been
# left behind by things like symbolic_override
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_canonicalize_graph_fuser_ops(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_peephole(graph, True)
torch._C._jit_pass_fuse_addmm(graph)
torch._C._jit_pass_lint(graph)
# graph is not a valid jit graph anymore because types have been replaced
# (e.g. int with Tensor), so it now contains operators that don't actually
# exist. We can't run normal dead code elimination because it'd fail trying
# to look up if an operator has side effects, but we can run a dead code
# elimination variant that doesn't need to look up if an op has side effects.
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
return graph
class CmpFlag(Enum):
"""
Enum for comparison flags
"""
EQUAL = 0
LESS = 1
LESS_EQUAL = 2
GREATER = 3
GREATER_EQUAL = 4
NOT_EQUAL = 5
def compare_torch_version(compare_type:CmpFlag, version:str):
if compare_type == CmpFlag.EQUAL:
return LooseVersion(torch.__version__) == LooseVersion(version)
if compare_type == CmpFlag.LESS:
return LooseVersion(torch.__version__) < LooseVersion(version)
if compare_type == CmpFlag.LESS_EQUAL:
return LooseVersion(torch.__version__) <= LooseVersion(version)
if compare_type == CmpFlag.GREATER:
return LooseVersion(torch.__version__) > LooseVersion(version)
if compare_type == CmpFlag.GREATER_EQUAL:
return LooseVersion(torch.__version__) >= LooseVersion(version)
if compare_type == CmpFlag.NOT_EQUAL:
return LooseVersion(torch.__version__) != LooseVersion(version)
def optimize_graph(graph, is_jit_graph=False, module=None):
if compare_torch_version(CmpFlag.GREATER, "1.5.9") and compare_torch_version(CmpFlag.LESS, "1.9.0"):
_optimize_graph_17(graph)
return graph
if compare_torch_version(CmpFlag.GREATER_EQUAL, "1.9.0"):
_optimize_graph_19(graph, is_jit_graph, module)
return graph
if is_jit_graph:
torch._C._jit_pass_inline(graph)
# Remove fork/wait nodes
torch._C._jit_pass_inline_fork_wait(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_remove_inplace_ops(graph)
# we record now record some ops like ones/zeros
# into a trace where we previously recorded constants
# use constant prop to maintain our current level of onnx support
# without implementing symbolics for all of them
torch._C._jit_pass_constant_propagation(graph)
# _split_tensor_list_constants(graph, graph)
# run dce to eliminate dead parts of the graph that might have been
# left behind by things like symbolic_override
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
# torch._C._jit_pass_canonicalize_ops(graph)
# torch._C._jit_pass_lint(graph)
torch._C._jit_pass_peephole(graph, True)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
if compare_torch_version(CmpFlag.LESS, "1.5.0"):
torch._C._jit_pass_fixup_onnx_loops(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
return graph | null |
23,808 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def set_training(model, mode):
r"""
A context manager to temporarily set the training mode of 'model'
to 'mode', resetting it when we exit the with-block. A no-op if
mode is None.
"""
if mode is None:
yield
return
old_mode = model.training
if old_mode != mode:
model.train(mode)
try:
yield
finally:
if old_mode != mode:
model.train(old_mode)
def _get_trace_graph():
if hasattr(torch.jit, "_get_trace_graph"):
return torch.jit._get_trace_graph
else:
return torch.jit._trace._get_trace_graph
def _get_trace_map():
if hasattr(torch.jit, "_trace_module_map"):
return torch.jit._trace_module_map
else:
return torch.jit._trace._trace_module_map
def _init_trace_map(init_map):
if hasattr(torch.jit, "_trace_module_map"):
torch.jit._trace_module_map = init_map
else:
torch.jit._trace._trace_module_map = init_map
def _init_trace_state():
_init_trace_map({})
_TRACED_STACK_MODULES.clear()
def _register_trace_fn(module):
if _traced_module_pre not in module._forward_pre_hooks.values():
_HOOKS.append(module.register_forward_pre_hook(_traced_module_pre))
if _traced_module_post not in module._forward_hooks.values():
_HOOKS.append(module.register_forward_hook(_traced_module_post))
def _remove_trace_fn():
for handle in _HOOKS:
handle.remove()
def trace_and_get_graph_from_model(model, args, training):
orig_state_dict_keys = _unique_state_dict(model).keys()
# By default, training=False, which is good because running a model in
# training mode could result in internal buffers getting updated, dropout
# getting applied, etc. If you really know what you're doing, you
# can turn training=True (or None, to preserve whatever the original
# training mode was.)
with set_training(model, training):
if hasattr(torch.jit, "get_trace_graph"):
trace, torch_out = torch.jit.get_trace_graph(model, args)
graph = trace.graph()
else:
old_map = _get_trace_map()
_init_trace_state()
model.apply(_register_trace_fn)
graph, torch_out = _get_trace_graph()(model, args)
_remove_trace_fn()
_init_trace_map(old_map)
# torch.jit._trace_module_map = old_map
if orig_state_dict_keys != _unique_state_dict(model).keys():
raise RuntimeError("state_dict changed after running the tracer; "
"something weird is happening in your model!")
return graph, torch_out | null |
23,809 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def set_unique_name(value: torch.Value, name: str):
if hasattr(value, 'setUniqueName'): # torch1.1
value.setUniqueName(name)
elif hasattr(value, 'setDebugName'): # torch1.2
value.setDebugName(name)
else:
raise DeprecatedAPIError('setDebugName', value.__class__.__name__)
def unique_name(value: torch.Value):
if hasattr(value, 'uniqueName'): # torch1.1
return value.uniqueName()
elif hasattr(value, 'debugName'): # torch1.2
return value.debugName()
else:
raise DeprecatedAPIError('debugName', value.__class__.__name__)
def rename_graph_param_name(model, graph):
state_dict = _unique_state_dict(model)
graph_inputs = list(graph.inputs())
user_input_num = len(graph_inputs) - len(state_dict)
param_names = list(state_dict.keys())
params = []
for i, inp in enumerate(graph_inputs):
if i >= user_input_num:
set_unique_name(inp, param_names[i - user_input_num])
params.append(unique_name(inp))
else:
set_unique_name(inp, 'input_' + str(i))
return params | null |
23,810 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_use_chains(root_node, terminate=lambda _: False):
"""
Track a chain of users of this node forward, returning a list of chains
See get_attr_chains below for its usage
"""
def concat_lists(lists):
return itertools.chain.from_iterable(lists)
def inner(current, accum):
users = get_users(current)
if not users or terminate(users):
return [accum]
return concat_lists([inner(nxt, accum + [nxt]) for nxt in users])
return inner(root_node, [root_node])
The provided code snippet includes necessary dependencies for implementing the `get_attr_chains` function. Write a Python function `def get_attr_chains(root_getattr_node)` to solve the following problem:
Returns chains of attribute access starting from root_getattr_node For example, given attribute "block", as in "self.block" when "self" points to the top level torch.nn.Module, it returns lists of attribute "chains", e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params'] These sets of attributes form full attribute accessors. For example, "self.block.1", "self.block.2" will return the second and third submodule, and "self.block.0._packed_params" will return the parameters of the first submodule.
Here is the function:
def get_attr_chains(root_getattr_node):
"""Returns chains of attribute access starting from root_getattr_node
For example, given attribute "block", as in "self.block" when "self" points
to the top level torch.nn.Module, it returns lists of attribute "chains",
e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params']
These sets of attributes form full attribute accessors. For example,
"self.block.1", "self.block.2" will return the second and third submodule,
and "self.block.0._packed_params" will return the parameters of the first
submodule.
"""
def terminate(users):
next_attrs = [user for user in users if user.kind() == "prim::GetAttr"]
return len(next_attrs) == 0
return get_use_chains(root_getattr_node, terminate) | Returns chains of attribute access starting from root_getattr_node For example, given attribute "block", as in "self.block" when "self" points to the top level torch.nn.Module, it returns lists of attribute "chains", e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params'] These sets of attributes form full attribute accessors. For example, "self.block.1", "self.block.2" will return the second and third submodule, and "self.block.0._packed_params" will return the parameters of the first submodule. |
23,811 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def getattr_attr_name(node):
attribute_names = node.attributeNames()
assert len(attribute_names) == 1
attr_name = node.s(attribute_names[0])
return attr_name
def getattr_full_name(getattrs):
return ".".join([getattr_attr_name(node) for node in getattrs]) | null |
23,812 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def unique_name(value: torch.Value):
def get_fw_graph_inputs(graph):
for inp in graph.inputs():
if unique_name(inp) == "self":
continue
else:
yield inp | null |
23,813 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_fw_graph_ret_node(graph):
if isinstance(graph, torch._C.Block):
return graph.returnNode()
else:
return graph.return_node() | null |
23,814 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_fw_graph_ret_value(graph):
if isinstance(graph, torch._C.Block):
for ret in graph.returnNode().inputs():
yield ret
else:
for ret in graph.return_node().inputs():
yield ret | null |
23,815 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def node_type(node):
return node.kind()
def should_construct_dynamic_list(list_construct_node):
# if this list is element-accessed or modified at runtime, generate List ADT
return node_type(list_construct_node) == "prim::ListConstruct" and len(list(list_construct_node.inputs())) == 0 | null |
23,816 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_node_outputs_name(node: torch.Node):
def node_type(node):
def parse_node_signature(node):
def is_schema_matching(node, schema):
class SchemaHelper(object):
def __init__(self, func_schema):
def get_arguments(self):
def arg_name(self, arg):
def arg_type(self, arg):
def toString(self):
def normalized_type(self, arg):
def _emit_arg(self, arg):
def _emit_args(self, arguments):
def _emit_ret(self, ret):
def _emit_rets(self, returns):
def op_name(self):
def get_node_schema(node):
schema_op = node_type(node)
schemas = torch._C._jit_get_schemas_for_operator(schema_op)
for schema in schemas:
if is_schema_matching(node, schema):
if NndctOption.nndct_parse_debug.value >= 1:
NndctDebugLogger.write(f"%{get_node_outputs_name(node)[0]} signature: {parse_node_signature(node)}\n")
schema_handler = SchemaHelper(schema)
NndctDebugLogger.write(f"matched schema: {schema_handler.toString()}\n")
return schema
if schema_op.split("::")[0] == "aten":
#assert False
NndctScreenLogger().warning(f"Can't find schema for {node}.If you can get quantizable model successfully, please ignore it.\n") | null |
23,817 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def set_unique_name(value: torch.Value, name: str):
if hasattr(value, 'setUniqueName'): # torch1.1
value.setUniqueName(name)
elif hasattr(value, 'setDebugName'): # torch1.2
value.setDebugName(name)
else:
raise DeprecatedAPIError('setDebugName', value.__class__.__name__)
def rename_graph_inputs(graph):
for i, inp in enumerate(list(graph.inputs())[1:]):
set_unique_name(inp, 'input_' + str(i)) | null |
23,818 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_users(node):
return [use.user for use in _get_uses(node)]
def _remove_dce_node(blocks, redundant_nodes):
for block in blocks:
for node in block.nodes():
_remove_dce_node(node.blocks(), redundant_nodes)
if not get_users(node):
redundant_nodes.append(node) | null |
23,819 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_jit_value_device(value):
if str(value.type()) == "Tensor" and hasattr(value.type(), "device"):
return value.type().device()
elif value.type().str():
if "cpu" in value.type().str():
device = "cpu"
elif "cuda" in value.type().str():
device = "cuda"
else:
device = "unknown"
return device
else:
return "unknown" | null |
23,820 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def freeze_graph_wo_opt(module, preserved_attrs=None):
from torch.jit._script import RecursiveScriptModule
if not isinstance(module, torch.jit.ScriptModule):
raise RuntimeError(
"Freezing expects a ScriptModule as input. "
"Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'."
)
if module.training:
raise RuntimeError(
"Freezing is currently only implemented for modules in eval mode. "
"Please call .eval() on your module before freezing."
)
preserved_attrs = preserved_attrs if preserved_attrs is not None else []
out = RecursiveScriptModule(torch._C._freeze_module(module._c, preserved_attrs))
RecursiveScriptModule._finalize_scriptmodule(out)
return out | null |
23,821 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def get_operation_caller_by_schema_name(schema_name):
caller_info = torch._C._jit_get_operation(schema_name)
if isinstance(caller_info, list) or isinstance(caller_info, tuple):
real_caller = []
[real_caller.append(x) for x in caller_info if 'builtin_function_or_method' in str(x.__class__)]
if len(real_caller) == 0:
return None
if len(real_caller) > 1:
NndctScreenLogger().warning(f"schema_name has more than one caller, but our program only use the first caller!!")
return real_caller[0]
if isinstance(caller_info, object):
return caller_info
raise("unknow caller type") | null |
23,822 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
class FlattenInOutModelForTrace(torch.nn.Module):
def getModelName(cls):
return 'FlattenInOutModelForTrace'
def getOriginModelNameFormString(cls, data_str):
names = re.findall(r"nndct_st_([\w_]+)_ed", data_str)
if len(names) > 0:
return names[0]
else:
return None
def check_need_recovery_name(cls, name):
return 'nndct_st_' in name or 'FlattenInOutModelForTrace' in name
def recovery_tensor_name(cls, name):
return re.sub(r"nndct_st_[\w_]+_ed\.", '', name)
def recovery_node_scope_name(cls, scope_name):
real_class_name = re.findall(r"FlattenInOutModelForTrace/(.*)?\[nndct_st_[\w_]+_ed\]", scope_name)
if len(real_class_name) > 0:
scope_name = re.sub(r"FlattenInOutModelForTrace/(.*)?\[nndct_st_[\w_]+_ed\]", real_class_name[0], scope_name)
real_model_name = re.findall(r".*FlattenInOutModelForTrace::/(.*::)nndct_st_[\w_]+_ed", scope_name)
if len(real_model_name) > 0:
scope_name = re.sub(r".*FlattenInOutModelForTrace::/(.*::)nndct_st_[\w_]+_ed", real_model_name[0], scope_name)
return scope_name
def __init__(self, inner_model, input_schema) -> None:
super().__init__()
self.module_name = "nndct_st_" + inner_model._get_name() + "_ed"
setattr(self, self.module_name, inner_model)
self.input_schema = input_schema
self.training = inner_model.training
def forward(self, *flatten_input):
input = self.input_schema(flatten_input)
output = getattr(self, self.module_name)(*input['args'], **input['kwargs'])
flatten_output, _ = flatten_to_tuple(output)
return flatten_output
def get_node_scope_name(fw_node):
from pytorch_nndct.parse.rich_in_out_helper import FlattenInOutModelForTrace
scope_name = fw_node.scopeName()
scope_name = FlattenInOutModelForTrace.recovery_node_scope_name(scope_name)
if scope_name.startswith("__"):
return scope_name.split("/")[-1]
else:
return scope_name | null |
23,823 | import contextlib
import re
import itertools
import torch.jit
from torch.jit import _unique_state_dict
from torch.nn import ModuleList
from .schema import SchemaHelper, convert_type_str
from .torch_const import TorchGraphSymbol
from nndct_shared.utils import DeprecatedAPIError, NndctScreenLogger, NndctDebugLogger, NndctOption, GLOBAL_MAP, NNDCT_KEYS
from pytorch_nndct.utils.torch_utils import CmpFlag, compare_torch_version
def find_fw_nodes_by_type(g, type, filter=None):
def remove_node(fw_node):
def remove_quant_dequant_stub(g):
def pyop_filter(fw_node):
if fw_node.pyname() in ["QuantStubF", "DeQuantStubF"]:
return True
else:
return False
stubs = find_fw_nodes_by_type(g, "prim::PythonOp", pyop_filter)
for node in stubs:
remove_node(node) | null |
23,824 | import functools
import inspect
import types
from typing import List, Optional
from nndct_shared.utils import GLOBAL_MAP, NNDCT_KEYS, NndctScreenLogger, NNDCT_OP, QError, QWarning, QNote
import torch
from .nndct2torch_op_map import add_mapping_item
from .torch_const import TorchSymbol
from .torch_op_attr import gen_attr
def add_mapping_item(nndct_op_name: str, torch_op_name: str):
global _NNDCT_OP_2_TORCH_OP
if nndct_op_name not in _NNDCT_OP_2_TORCH_OP:
_NNDCT_OP_2_TORCH_OP[nndct_op_name] = torch_op_name
global _TORCH_OP_2_NNDCT_OP
if torch_op_name not in _TORCH_OP_2_NNDCT_OP:
_TORCH_OP_2_NNDCT_OP[torch_op_name] = nndct_op_name
def gen_attr(torch_op: str, force_to_primitive: bool, schema: "Schema", class_type=None):
global _TORCH_OP_ATTR_MAP
if torch_op not in _TORCH_OP_ATTR_MAP:
op_attr = TorchOpAttr(torch_op)
op_attr.set_op_class_type(force_to_primitive, schema, class_type)
op_attr.fill_in_attr_and_inputs_table(schema)
_TORCH_OP_ATTR_MAP[torch_op] = op_attr
def op_register(nndct_op: str, torch_op: str, force_to_primitive=False, schema=None, class_type=None):
add_mapping_item(nndct_op, torch_op)
gen_attr(torch_op, force_to_primitive=force_to_primitive, schema=schema, class_type=class_type) | null |
23,825 | import functools
import inspect
import types
from typing import List, Optional
from nndct_shared.utils import GLOBAL_MAP, NNDCT_KEYS, NndctScreenLogger, NNDCT_OP, QError, QWarning, QNote
import torch
from .nndct2torch_op_map import add_mapping_item
from .torch_const import TorchSymbol
from .torch_op_attr import gen_attr
_QUANT_MODULES = []
def register_quant_op(func):
if not inspect.isfunction(func):
raise RuntimeError("Only decorate function")
global _QUANT_MODULES
_QUANT_MODULES.append(func.__name__)
@functools.wraps(func)
def innner(*args, **kwargs):
return func(*args, **kwargs)
return innner | null |
23,826 | import functools
import inspect
import types
from typing import List, Optional
from nndct_shared.utils import GLOBAL_MAP, NNDCT_KEYS, NndctScreenLogger, NNDCT_OP, QError, QWarning, QNote
import torch
from .nndct2torch_op_map import add_mapping_item
from .torch_const import TorchSymbol
from .torch_op_attr import gen_attr
_QUANT_MODULES = []
class TorchSymbol():
MODULE_OUTPUT_PREFIX = "output"
MODULE_PREFIX = "py_nndct.nn"
MODULE_NAME_SEPERATOR = "_"
MODULE_BASE_SYMBOL = "module"
SCRIPT_SUFFIX = ".py"
def get_defined_quant_module(torch_op: str):
quant_modules_lower = [module.lower() for module in _QUANT_MODULES]
if torch_op.lower() in quant_modules_lower:
index = quant_modules_lower.index(torch_op.lower())
return ".".join([TorchSymbol.MODULE_PREFIX, _QUANT_MODULES[index]]) | null |
23,827 | import functools
import inspect
import types
from typing import List, Optional
from nndct_shared.utils import GLOBAL_MAP, NNDCT_KEYS, NndctScreenLogger, NNDCT_OP, QError, QWarning, QNote
import torch
from .nndct2torch_op_map import add_mapping_item
from .torch_const import TorchSymbol
from .torch_op_attr import gen_attr
The provided code snippet includes necessary dependencies for implementing the `register_custom_op` function. Write a Python function `def register_custom_op(op_type: str, attrs_list: Optional[List[str]] = None, mapping_to_xir=False)` to solve the following problem:
The decorator is used to register the function as a custom operation. Args: op_type(str): The operator type registered into quantizer. The type should not conflict with pytorch_nndct attrs_list(Optional[List[str]], optional): the name list of attributes that define operation flavor. For example, Convolution operation has such attributes as padding, dilation, stride and groups. The order of name in attrs_list should be consistent with that of the arguments list. Default: None
Here is the function:
def register_custom_op(op_type: str, attrs_list: Optional[List[str]] = None, mapping_to_xir=False):
"""The decorator is used to register the function as a custom operation.
Args:
op_type(str): The operator type registered into quantizer.
The type should not conflict with pytorch_nndct
attrs_list(Optional[List[str]], optional): the name list of attributes that define operation flavor.
For example, Convolution operation has such attributes as padding, dilation, stride and groups.
The order of name in attrs_list should be consistent with that of the arguments list.
Default: None
"""
def decorate(func):
if op_type in NNDCT_OP.__dict__.values() and (not mapping_to_xir):
NndctScreenLogger().error2user(QError.OP_REGIST, f"'{op_type}' has been defined in pytorch_nndct, please use other type name.")
exit(1)
if not inspect.isfunction(func):
RuntimeError("This api only decorate a function object")
custom_op_attr_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.CUSTOM_OP_ATTRS_MAP)
if custom_op_attr_map is None:
custom_op_attr_map = {}
GLOBAL_MAP.set_map(NNDCT_KEYS.CUSTOM_OP_ATTRS_MAP, custom_op_attr_map)
if op_type in custom_op_attr_map:
NndctScreenLogger().error2user(QError.OP_REGIST, f"'{op_type}' can't be registered multiple times.")
else:
custom_op_attr_map[op_type] = attrs_list if attrs_list is not None else []
if mapping_to_xir is True:
NndctScreenLogger().info(f'`{op_type}` has been mapped to xir.')
custom2xir = GLOBAL_MAP.get_ele(NNDCT_KEYS.CUSTOM_TO_XIR_LIST)
if custom2xir is None:
custom2xir = []
GLOBAL_MAP.set_map(NNDCT_KEYS.CUSTOM_TO_XIR_LIST, custom2xir)
if op_type not in custom2xir:
custom2xir.append(op_type)
else:
raise RuntimeError(f"{op_type} has alrealy been mapped to XIR. Please use this op type instead of custom op.")
@functools.wraps(func)
def innner(*args, **kwargs):
custom_op = types.new_class(op_type, (torch.autograd.Function,), {})
custom_op.forward = staticmethod(func)
return custom_op.apply(*args, **kwargs)
return innner
NndctScreenLogger().info(f'`{op_type}` has been registered as a custom op.')
return decorate | The decorator is used to register the function as a custom operation. Args: op_type(str): The operator type registered into quantizer. The type should not conflict with pytorch_nndct attrs_list(Optional[List[str]], optional): the name list of attributes that define operation flavor. For example, Convolution operation has such attributes as padding, dilation, stride and groups. The order of name in attrs_list should be consistent with that of the arguments list. Default: None |
23,828 | from typing import NamedTuple
from typing import Iterator, Iterable
import math
import numpy as np
PRECISIONS, EXPONENT_RANGES, DTYPES = {}, {}, {}
The provided code snippet includes necessary dependencies for implementing the `min_positive_subnormal` function. Write a Python function `def min_positive_subnormal(dtype_str: str)` to solve the following problem:
Returns the smallest positive subnormal number
Here is the function:
def min_positive_subnormal(dtype_str: str):
"""Returns the smallest positive subnormal number"""
return np.exp2(EXPONENT_RANGES[dtype_str][0] - (PRECISIONS[dtype_str] - 1)) | Returns the smallest positive subnormal number |
23,829 | from typing import NamedTuple
from typing import Iterator, Iterable
import math
import numpy as np
def to_fp32(x):
"""Returns `np.float32(x)`"""
return np.float32(x)
def to_fp16(x):
"""Returns `np.float16(x)`"""
return np.float16(x)
def to_fp64(x):
"""Returns `np.float64(x)`"""
return np.float64(x)
def to_bf16(x, mode='rn'):
"""Returns Google Brain Float16 (bfloat16) format by truncation.
The `bfloat16` format is a 16-bit field with a 1-bit sign, an 8-bit exponent,
and a 7-bit trailing significand field (a.k.a. mantissa in older
specifications). Google's default casting routine converts an IEEE `binary32`
(float32) to bfloat16 by copying the upper 16 bits.
Since `numpy` does not natively support `bfloat16`, this routine returns
a `bfloat16` number as a `numpy.float32` rounded according to `mode`.
.. code-block:: c
static inline tensorflow::bfloat16 FloatToBFloat16(float float_val) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return *reinterpret_cast<tensorflow::bfloat16*>(
reinterpret_cast<uint16_t*>(&float_val));
#else
return *reinterpret_cast<tensorflow::bfloat16*>(
&(reinterpret_cast<uint16_t*>(&float_val)[1]));
#endif
}
Args:
x (ArrayLike): Input array
mode (str): Use `rn` for round-to-nearest and `rd` for round-down
(truncation)
Returns:
Bfloat16 data
"""
return np.float32(round_msbs(x, PRECISIONS['bf16'], mode))
def to_tf32(x, mode='rn'):
"""Converts an input array to the TF32 format (1, 8, 10).
Args:
x (ArrayLike): Input array
mode (str): Use `rn` for round-to-nearest and `rd` for round-down
(truncation)
Returns:
TF32 data
"""
return np.float32(round_msbs(x, PRECISIONS['tf32'], mode))
The provided code snippet includes necessary dependencies for implementing the `to_dtype` function. Write a Python function `def to_dtype(x, dtype_str, mode='rn')` to solve the following problem:
Converts an input array to another data type. Args: x (ArrayLike): Input array dtype_str: Use `bf16` or `bfloat16` for the Google Brain 16-bit floating-point format, `tf32` for the Nvidia TF32 format, which has the precsion of fp16 and the dynamic range of fp32, `fp16` or `float16` for IEEE `binary16`, and `fp32` or `float32` for IEEE `binary32`. mode (str): Use `rn` for round-to-nearest and `rd` for round-down (truncation) Returns: Array convernd to the requested data type
Here is the function:
def to_dtype(x, dtype_str, mode='rn'):
"""Converts an input array to another data type.
Args:
x (ArrayLike): Input array
dtype_str: Use `bf16` or `bfloat16` for the Google Brain 16-bit
floating-point format, `tf32` for the Nvidia TF32 format, which has the
precsion of fp16 and the dynamic range of fp32, `fp16` or `float16` for
IEEE `binary16`, and `fp32` or `float32` for IEEE `binary32`.
mode (str): Use `rn` for round-to-nearest and `rd` for round-down
(truncation)
Returns:
Array convernd to the requested data type
"""
if dtype_str in ['bf16', 'bfloat16']:
return to_bf16(x, mode=mode)
if dtype_str == 'tf32':
return to_tf32(x, mode=mode)
if dtype_str in ['fp16', 'float16']:
assert mode == 'rn'
return to_fp16(x)
if dtype_str in ['fp32', 'float32']:
assert mode == 'rn'
return to_fp32(x)
if dtype_str in ['fp64', 'float64']:
assert mode == 'rn'
return to_fp64(x)
return x | Converts an input array to another data type. Args: x (ArrayLike): Input array dtype_str: Use `bf16` or `bfloat16` for the Google Brain 16-bit floating-point format, `tf32` for the Nvidia TF32 format, which has the precsion of fp16 and the dynamic range of fp32, `fp16` or `float16` for IEEE `binary16`, and `fp32` or `float32` for IEEE `binary32`. mode (str): Use `rn` for round-to-nearest and `rd` for round-down (truncation) Returns: Array convernd to the requested data type |
23,830 | from typing import NamedTuple
from typing import Iterator, Iterable
import math
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `float_generator` function. Write a Python function `def float_generator(precision: int, exponent_range: Iterable[int], subnormals=True) -> Iterator[np.float64]` to solve the following problem:
Generates all positive floating-point numbers in the exponent range according to the precision. Examples: The following examples enumerate all normal floating-point numbers for the IEEE `binary16` format in the `numpy` array `x_fp16` and for the Google Brain Float `bfloat16` format in `x_bf16`. >>> x_fp16 = np.array([i for i in float_generator(11, range(-14, 16))]) >>> x_fp16.size, x_fp16[0], x_fp16[-1] (31743, 5.960464477539063e-08, 65504.0) >>> x_bf16 = np.array([i for i in float_generator(8, range(-126, 128))]) >>> x_bf16.size, x_bf16[0], x_bf16[-1] (32639, 9.183549615799121e-41, 3.3895313892515355e+38) Args: precison: Number of bits of precision in the floating-point format of interest, which is the number of bits in the trailing signficand field plus one for the hidden leading bit. For instance, the IEEE `binary32` format has 24 bits of precision and the `binary16` format has 11. exponent_range: An iterable in the form `range(exp_min, exp_max+1)`. For instance, the IEEE `binary16` exponent range is `range(-14, 16)` and the `binary32` exponent range is `range(-126, 128)`. subnormals: Outputs subnormals iff `True` Yields: Every floating-point number of the requested precision and exponent range starting with the lowest number.
Here is the function:
def float_generator(precision: int,
exponent_range: Iterable[int],
subnormals=True) -> Iterator[np.float64]:
"""Generates all positive floating-point numbers in the exponent range
according to the precision.
Examples:
The following examples enumerate all normal floating-point
numbers for the IEEE `binary16` format in the `numpy` array `x_fp16` and
for the Google Brain Float `bfloat16` format in `x_bf16`.
>>> x_fp16 = np.array([i for i in float_generator(11, range(-14, 16))])
>>> x_fp16.size, x_fp16[0], x_fp16[-1]
(31743, 5.960464477539063e-08, 65504.0)
>>> x_bf16 = np.array([i for i in float_generator(8, range(-126, 128))])
>>> x_bf16.size, x_bf16[0], x_bf16[-1]
(32639, 9.183549615799121e-41, 3.3895313892515355e+38)
Args:
precison: Number of bits of precision in the floating-point format of
interest, which is the number of bits in the trailing signficand field
plus one for the hidden leading bit. For instance, the IEEE `binary32`
format has 24 bits of precision and the `binary16` format has 11.
exponent_range: An iterable in the form `range(exp_min, exp_max+1)`. For
instance, the IEEE `binary16` exponent range is `range(-14, 16)` and
the `binary32` exponent range is `range(-126, 128)`.
subnormals: Outputs subnormals iff `True`
Yields:
Every floating-point number of the requested precision and
exponent range starting with the lowest number.
"""
if subnormals:
for trailing_significand_int in range(1, 2**(precision - 1)): # subnormals
yield np.ldexp(trailing_significand_int / (2**precision),
exponent_range[0] + 1)
for exponent in exponent_range:
for trailing_significand_int in range(2**(precision - 1), 2**precision):
yield np.ldexp(trailing_significand_int / (2**precision), exponent + 1) | Generates all positive floating-point numbers in the exponent range according to the precision. Examples: The following examples enumerate all normal floating-point numbers for the IEEE `binary16` format in the `numpy` array `x_fp16` and for the Google Brain Float `bfloat16` format in `x_bf16`. >>> x_fp16 = np.array([i for i in float_generator(11, range(-14, 16))]) >>> x_fp16.size, x_fp16[0], x_fp16[-1] (31743, 5.960464477539063e-08, 65504.0) >>> x_bf16 = np.array([i for i in float_generator(8, range(-126, 128))]) >>> x_bf16.size, x_bf16[0], x_bf16[-1] (32639, 9.183549615799121e-41, 3.3895313892515355e+38) Args: precison: Number of bits of precision in the floating-point format of interest, which is the number of bits in the trailing signficand field plus one for the hidden leading bit. For instance, the IEEE `binary32` format has 24 bits of precision and the `binary16` format has 11. exponent_range: An iterable in the form `range(exp_min, exp_max+1)`. For instance, the IEEE `binary16` exponent range is `range(-14, 16)` and the `binary32` exponent range is `range(-126, 128)`. subnormals: Outputs subnormals iff `True` Yields: Every floating-point number of the requested precision and exponent range starting with the lowest number. |
23,831 | from typing import NamedTuple
from typing import Iterator, Iterable
import math
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `block_float` function. Write a Python function `def block_float(x, precision, block_size, block_axis)` to solve the following problem:
Forces a block of floating-point numbers to share an exponent, which is the maximum exponent of the numbers in the block. Numbers with non-maximum exponents lose precision and may become zero. Args: x (ArrayLike): Input array block_size (int): Every consecutive `block_size` numbers along `block_axis` share an exponent. block_axis (int): Use this axis in `x` for grouping adjacent numbers into blocks. Returns: An array of the same shape as `x` as block-floating point numbers. Blocks of `block_size` numbers use the same exponent.
Here is the function:
def block_float(x, precision, block_size, block_axis):
"""Forces a block of floating-point numbers to share an exponent, which is
the maximum exponent of the numbers in the block. Numbers with non-maximum
exponents lose precision and may become zero.
Args:
x (ArrayLike): Input array
block_size (int): Every consecutive `block_size` numbers along `block_axis`
share an exponent.
block_axis (int): Use this axis in `x` for grouping adjacent numbers into
blocks.
Returns:
An array of the same shape as `x` as block-floating point numbers.
Blocks of `block_size` numbers use the same exponent.
"""
new_shape = list(x.shape) + [block_size]
new_shape[block_axis] = -1 # infer x.shape[block_axis]//block_size
frac, exp = np.frexp(x.reshape(new_shape))
exp_max = exp.max(axis=len(x.shape))
exp_diff = np.broadcast_to(np.expand_dims(exp_max, axis=-1), exp.shape) - exp
new_frac = (
np.rint(frac * np.power(2.0, precision - exp_diff)) *
np.power(2.0, -precision + exp_diff))
return np.ldexp(new_frac, exp).reshape(x.shape) | Forces a block of floating-point numbers to share an exponent, which is the maximum exponent of the numbers in the block. Numbers with non-maximum exponents lose precision and may become zero. Args: x (ArrayLike): Input array block_size (int): Every consecutive `block_size` numbers along `block_axis` share an exponent. block_axis (int): Use this axis in `x` for grouping adjacent numbers into blocks. Returns: An array of the same shape as `x` as block-floating point numbers. Blocks of `block_size` numbers use the same exponent. |
23,832 | import torch
import numpy as np
from enum import Enum, unique
def sqrt(input):
x = input.to(torch.float32) # input tensor, float32
x2 = x*0.5 # float32
magic = 0x5f37
y = np.float32(x.cpu().detach().numpy()) # float32
i = y.view(np.int32) # int32
i = (magic - np.int32(i >> 17)) << 16 # int32
y = i.view(np.float32) # int32 to float32
y = torch.from_numpy(y).to(x.device).to(x.dtype) # float32, initial value of Newton iteration
# one step Newton iteration: y = y*(1.5 - (x2*y*y)) = 1.5*y - x2*y*y*y
y3h = 1.5*y # float32
y3h = y3h.to(torch.bfloat16).to(torch.float32) # float32 to bfloat16 to float32
out = y*x2 # float32
out = out.to(torch.bfloat16).to(torch.float32) # float32 to bfloat32 to float32
out = out*y
out = out.to(torch.bfloat16).to(torch.float32) # float32 to bfloat32 to float32
out = out*y
out = out.to(torch.bfloat16).to(torch.float32) # float32 to bfloat32 to float32
out = y3h - out
out = out.to(torch.bfloat16).to(torch.float32) # float32 to bfloat32 to float32
# sqrt(x) = x*(1/sqrt(x))
out = x*out
out = out.to(torch.bfloat16).to(torch.float32) # float32 to bfloat32 to float32
return out | null |
23,833 | import torch
import numpy as np
from enum import Enum, unique
def isqrt(input):
from bfloat16 import bfloat16
def downshift_onebit(i): # input: int16, numpy ndarray
x = (i >> 1).reshape(-1)
b = (i&1).reshape(-1)
y = x
for j in range(len(x)):
if x[j]&1 == 1: # odd
y[j] = y[j] + b[j]
y = y.reshape(i.shape)
return y
input_np = input.detach().cpu().numpy()
x = input_np.astype(bfloat16).astype(np.float32) # input tensor, bfloat16
x2 = x*0.5
x2 = x2.astype(bfloat16).astype(np.float32)
y = input_np.astype(bfloat16) # bfloat16
i = y.view(np.int16) # int16
i = 0x5f37 - downshift_onebit(i)
y = i.view(bfloat16)
y = y.astype(np.float32)
# 4-steps-Newton iteration: y = y*(1.5 - (x2*y*y))
for i in range(4):
y2 = y*y
y2 = y2.astype(bfloat16).astype(np.float32)
mul2 = x2*y2
mul2 = mul2.astype(bfloat16).astype(np.float32)
sub = 1.5 - mul2
sub = sub.astype(bfloat16).astype(np.float32)
mul = y*sub
y = mul.astype(bfloat16).astype(np.float32)
return torch.from_numpy(y).to(input.device) | null |
23,834 | import torch
import numpy as np
from enum import Enum, unique
def invsqrt(number):
x2 = number.astype(np.float32)
x2 = x2 * 0.5
y = number.astype(np.float32)
threehalfs = 1.5
i = y.view(np.int32)
i = 0x5f3759df - (i >> 1)
y = i.view(np.float32)
y = y * (threehalfs - (x2 * y * y))
y = y * (threehalfs - (x2 * y * y))
y = y * (threehalfs - (x2 * y * y))
y = y * (threehalfs - (x2 * y * y))
return y | null |
23,835 | import torch
import numpy as np
from enum import Enum, unique
def aie_add_v8(v8): # input: tensor (L, 8)
v4 = v8[:, 0:4] + v8[:, 4:]
v2 = v4[:, 0:2] + v4[:, 2:]
v1 = v2[:, 0] + v2[:, 1]
return v1 # output: tensor (L,) | null |
23,836 | import torch
import numpy as np
from enum import Enum, unique
def aie_add_v16(v16): # input: tensor (L, 16)
v8 = v16[:, 0:8] + v16[:, 8:]
v4 = v8[:, 0:4] + v8[:, 4:]
v2 = v4[:, 0:2] + v4[:, 2:]
v1 = v2[:, 0] + v2[:, 1]
return v1 # output: tensor (L,) | null |
23,837 | import inspect
from typing import Callable, Dict, Tuple
import torch
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from .torch_const import TorchOpClassType
from .jit_utils import find_builtin, modules_containing_builtins, builtin_ops
from .schema import SchemaHelper
def get_torch_op_attr_map():
global _TORCH_OP_ATTR_MAP
if len(_TORCH_OP_ATTR_MAP) == 0:
raise Exception(
'please build the torch_op_type - > torch_op_attributes map')
return _TORCH_OP_ATTR_MAP
from functools import namedtuple
def get_torch_op_attr(torch_op_type):
if get_torch_op_attr_map().get(torch_op_type, None) is None:
raise Exception(
'pleas check torch op attribute :"{}"'.format(torch_op_type))
else:
return get_torch_op_attr_map()[torch_op_type] | null |
23,838 | import inspect
from typing import Callable, Dict, Tuple
import torch
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from .torch_const import TorchOpClassType
from .jit_utils import find_builtin, modules_containing_builtins, builtin_ops
from .schema import SchemaHelper
from functools import namedtuple
TorchOp = namedtuple("TorchOp", ["name", "caller", "op_class_type"])
def _get_tensor_ops():
def is_tensor_method(schema):
if len(schema.arguments) == 0:
return False
self = schema.arguments[0]
if self.name != 'self':
return False
if not self.type.isSubtypeOf(torch._C.TensorType.get()):
return False
return True
# discover methods
for elem in dir(torch.Tensor):
if not _hidden(elem):
schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem)
for schema in schemas:
if is_tensor_method(schema):
# aten_schema = SchemaWrapper(f"aten::{elem}", schema)
torchop = TorchOp(name=elem, caller=None, op_class_type=TorchOpClassType.TENSOR)
_make_pair(schema, torchop)
def _get_nn_functional_ops():
# Iterate over torch.nn.functional
mod = torch.nn.functional
name = mod.__name__
for elem in dir(torch.nn.functional):
attr = getattr(mod, elem)
if not inspect.isfunction(attr) or _hidden(elem[0]):
# Ignore non-functions and internal methods
continue
attr_module = inspect.getmodule(attr)
if not attr_module:
raise RuntimeError(f'Module for {attr} not found')
if 'torch.nn.functional' not in attr_module.__name__:
# Ignore functions from outside torch.nn.functional
continue
try:
# compile fn, get schema
scripted = torch.jit.script(attr)
schema = scripted.schema
torchop = TorchOp(name=elem, caller=attr, op_class_type=TorchOpClassType.NN_FUNCTION)
_make_pair(schema, torchop)
except: # noqa
# Skip interpolate / boolean dispatched things
pass
# Iterate over modules that we know contain a lot of builtins
for mod in modules_containing_builtins():
name = mod.__name__
for elem in dir(mod):
builtin = find_builtin(getattr(mod, elem))
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
# remove _tan but not __and__
if not _hidden(elem):
# aten_schema = SchemaWrapper(f"{builtin}", schema)
if name == "torch._C._nn":
torchop = TorchOp(name=elem, caller=getattr(mod, elem), op_class_type=TorchOpClassType.NN_CORE_FUNCTION)
else:
torchop = TorchOp(name=elem, caller=getattr(mod, elem), op_class_type=TorchOpClassType.TORCH_FUNCTION)
_make_pair(schema, torchop)
def _get_torchscript_builtins():
builtins = filter(lambda fn: not _is_math_fn(fn[0]), _get_builtins_helper())
builtins_list = list(builtins)
# Iterate over the specially added builtins
for fn, _builtin_name in builtins_list:
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f'Module for {fn} not found')
builtin = find_builtin(fn)
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
# aten_schema = SchemaWrapper(f"{builtin}", schema)
torchop = TorchOp(name=builtin, caller=fn, op_class_type=TorchOpClassType.TORCH_SCRIPT_BUILTIN_FUNCTION)
_make_pair(schema, torchop)
def _get_math_builtins():
builtins = filter(lambda fn: _is_math_fn(fn[0]), _get_builtins_helper())
builtins_list = list(builtins)
# Iterate over the specially added builtins
for fn, _builtin_name in builtins_list:
mod = inspect.getmodule(fn)
if not mod:
raise RuntimeError(f'Module for {fn} not found')
builtin = find_builtin(fn)
if builtin is not None:
schemas = torch._C._jit_get_schemas_for_operator(builtin)
for schema in schemas:
# aten_schema = SchemaWrapper(f"{builtin}", schema)
schema_str = SchemaHelper(schema).toString()
if 'Tensor' in schema_str:
# Skip Tensor ops that have the same name as math functions
# (they will show up in the tensor methods section)
continue
torchop = TorchOp(name=builtin, caller=fn, op_class_type=TorchOpClassType.MATH_BUILTIN_FUNCTION)
_make_pair(schema, torchop)
def _get_global_builtins():
# Taken from the 'globals' map in torch/csrc/jit/frontend/ir_emitter.cppsss
supported_builtins = [
'print',
'tuple',
'float',
'int',
'bool',
'str',
'getattr',
'hasattr',
'isinstance',
'len',
'hex',
'oct',
'round',
'hash',
'min',
'max',
'abs',
'all',
'divmod',
'list',
'ord',
'chr',
'bin',
'range',
'zip',
'enumerate',
'sorted',
]
op_renames = {
'bool': 'aten::Bool',
'int': 'aten::Int',
'float': 'aten::Float',
'abs': 'prim::abs',
'max': 'prim::max',
'min': 'prim::min',
'range': 'fake::does_not_exist',
}
for fn in supported_builtins:
op_name = 'aten::{}'.format(fn)
if fn in op_renames:
op_name = op_renames[fn]
schemas = torch._C._jit_get_schemas_for_operator(op_name)
for s in schemas:
# aten_schema = SchemaWrapper(f"{op_name}", s)
torchop = TorchOp(name=op_name, caller=__builtins__[fn], op_class_type=TorchOpClassType.GLOBAL_BUILTIN_FUNCTION)
_make_pair(s, torchop)
GLOBAL_MAP = GlobalMap()
def build_aten_torch_ops_table():
op_gathering_fns = (_get_tensor_ops,
_get_nn_functional_ops,
_get_torchscript_builtins,
_get_global_builtins,
_get_math_builtins,
)
schema2torchop = GLOBAL_MAP.get_ele(NNDCT_KEYS.TORCH_SCHEMA_OP_TABLE)
# schema_lut = GLOBAL_MAP.get_ele(NNDCT_KEYS.SCHEMA_LUT)
if not schema2torchop:
schema2torchop: Dict[str, TorchOp] = {}
GLOBAL_MAP.set_map(NNDCT_KEYS.TORCH_SCHEMA_OP_TABLE, schema2torchop)
# schema_lut: Dict[Tuple(str, int), "Schema"] = {}
for fn in op_gathering_fns:
fn()
# for key, value in schema2torchop.items():
# schema_str = SchemaHelper(key).toString()
# print(key.name, schema_str)
# parsed_schema = torch._C.parse_schema(schema_str)
# if SchemaHelper(parsed_schema).toString() != schema_str:
# print(f"parsed_schema:{SchemaHelper(parsed_schema).toString()}")
# print(f"schema_str:{schema_str}")
# assert False | null |
23,839 | import re
def convert_type_str(typing_str):
if '[' not in typing_str:
return typing_str
special_type = typing_str.split('[')[0]
pattern, rlp_fn = patterns_rlp_pair[special_type]
return pattern.sub(rlp_fn, typing_str)
def _match_optional(match):
typing_str = match[1]
return convert_type_str(typing_str) + '?' | null |
23,840 | import re
def convert_type_str(typing_str):
if '[' not in typing_str:
return typing_str
special_type = typing_str.split('[')[0]
pattern, rlp_fn = patterns_rlp_pair[special_type]
return pattern.sub(rlp_fn, typing_str)
def _match_list(match):
typing_str = match[1]
return convert_type_str(typing_str) + '[]' | null |
23,841 | import re
def convert_type_str(typing_str):
if '[' not in typing_str:
return typing_str
special_type = typing_str.split('[')[0]
pattern, rlp_fn = patterns_rlp_pair[special_type]
return pattern.sub(rlp_fn, typing_str)
def _match_tuple(match):
x_typing_str = match[1]
y_typing_str = match[2]
return '(' + convert_type_str(x_typing_str) + ', ' + convert_type_str(y_typing_str) + ')' | null |
23,842 | import re
def convert_type_str(typing_str):
if '[' not in typing_str:
return typing_str
special_type = typing_str.split('[')[0]
pattern, rlp_fn = patterns_rlp_pair[special_type]
return pattern.sub(rlp_fn, typing_str)
def _match_dict(match):
x_typing_str = match[1]
y_typing_str = match[2]
return 'Dict(' + convert_type_str(x_typing_str) + ', ' + convert_type_str(y_typing_str) + ')' | null |
23,843 | import re
def convert_type_str(typing_str):
def _match_brackets(match):
outer_type_str = match[1]
inner_type_str = match[2]
return outer_type_str + '(' + convert_type_str(inner_type_str) + ')' | null |
23,844 | from enum import Enum, unique
def get_enum_val(en):
return en.value | null |
23,845 | from glob import glob
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import NndctScreenLogger
def get_nndct_op_2_torch_op_map():
global _NNDCT_OP_2_TORCH_OP
if len(_NNDCT_OP_2_TORCH_OP) == 0:
raise Exception('please build the nndct_op -> torch_op map')
return _NNDCT_OP_2_TORCH_OP
def get_torch_op_type(nndct_op_type):
# if nndct_op_type in TORCH_UNSUPPORTED_NNDCTOPS:
# return nndct_op_type
if get_nndct_op_2_torch_op_map().get(nndct_op_type, None) is None:
raise Exception('please register the operator:"{}"'.format(nndct_op_type))
else:
return get_nndct_op_2_torch_op_map()[nndct_op_type] | null |
23,846 | from glob import glob
from nndct_shared.base import NNDCT_OP
from nndct_shared.utils import NndctScreenLogger
def get_torch_op_2_nndct_op_map():
global _TORCH_OP_2_NNDCT_OP
if len(_TORCH_OP_2_NNDCT_OP) == 0:
raise Exception('please build the torch_op -> nndct_op map')
return _TORCH_OP_2_NNDCT_OP
def get_nndct_op_type(torch_op_type):
if get_torch_op_2_nndct_op_map().get(torch_op_type, None) is None:
#raise Exception('please register the operator:"{}"'.format(torch_op_type))
NndctScreenLogger().warning('There is no "{}" layer in this model, please remove "{}" \
configuration in config file'.format(torch_op_type, torch_op_type))
return
else:
return get_torch_op_2_nndct_op_map()[torch_op_type] | null |
23,847 | import time
import torch
from torch import nn
from nndct_shared.utils import common
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import torch_utils
class MetricName(object):
MACs = 'MACs'
FLOPs = 'FLOPs'
TrainableParams = 'trainable'
NonTrainableParams = 'non-trainable'
def _accumulate_metric_value(module, metric_name, value):
if hasattr(module, metric_name):
setattr(module, metric_name, getattr(module, metric_name) + value)
def count_convNd(module, input, output):
# kw x kh
num_kernels = (module.weight.size()[2:]).numel()
bias = 1 if module.bias is not None else 0
# c_out x w x h x (c_in x kw x kh + bias)
single_batch_output = output[0]
MACs = single_batch_output.numel() * (
module.in_channels // module.groups * num_kernels)
Flops = 2 * MACs + bias * single_batch_output.numel()
_accumulate_metric_value(module, MetricName.MACs, MACs)
_accumulate_metric_value(module, MetricName.FLOPs, Flops) | null |
23,848 | import time
import torch
from torch import nn
from nndct_shared.utils import common
from pytorch_nndct.utils import logging
from pytorch_nndct.utils import torch_utils
class MetricName(object):
MACs = 'MACs'
FLOPs = 'FLOPs'
TrainableParams = 'trainable'
NonTrainableParams = 'non-trainable'
def _accumulate_metric_value(module, metric_name, value):
if hasattr(module, metric_name):
setattr(module, metric_name, getattr(module, metric_name) + value)
def count_linear(module, input, output):
# (N, *, Hin) x (Hin, Hout) = (N, *, Hout)
bias = 1 if module.bias is not None else 0
MACs = module.in_features * output[0].numel()
Flops = 2 * MACs + bias * output.shape[-1]
_accumulate_metric_value(module, MetricName.MACs, MACs)
_accumulate_metric_value(module, MetricName.FLOPs, Flops) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.