id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
23,126 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Mapping, Any, Union, Tuple
import collections
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.metaclass import Singleton
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import node_group as node_group_lib
from nndct_shared.utils import registry
def find_prunable_ancestor(graph, node, target_ops=CONV_OPS):
def modify_concat(graph, node, pruning_res):
out_dim_missing = False
for tensor in node.in_tensors:
input_node = tensor.node
input_pruning = pruning_res[input_node.name]
if not input_pruning.has_out_dim():
out_dim_missing = True
break
node_pruning = pruning_res[node.name]
cur_offset = 0
out_dim = 0
removed_outputs = []
for tensor in node.in_tensors:
input_node = tensor.node
input_pruning = pruning_res[input_node.name]
if input_pruning.removed_outputs and out_dim_missing:
upstream_conv = find_prunable_ancestor(graph, input_node)
raise errors.OptimizerNotExcludeNodeError(
'Must exclude node from pruning: {}.'.format(upstream_conv.name))
if not out_dim_missing:
for ro in input_pruning.removed_outputs:
removed_outputs.append(ro + cur_offset)
out_dim += input_pruning.out_dim
cur_offset += (len(input_pruning.removed_outputs) + input_pruning.out_dim)
node_pruning.removed_outputs = removed_outputs
node_pruning.out_dim = out_dim
# update removed_inputs & in_dim
node_pruning.removed_inputs = node_pruning.removed_outputs
node_pruning.in_dim = node_pruning.out_dim | null |
23,127 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Mapping, Any, Union, Tuple
import collections
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.metaclass import Singleton
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import node_group as node_group_lib
from nndct_shared.utils import registry
CONV_OPS = [
OpTypes.CONV2D, OpTypes.CONVTRANSPOSE2D, OpTypes.CONV3D,
OpTypes.CONVTRANSPOSE3D, OpTypes.SEPARABLECONV2D
]
def find_prunable_ancestor(graph, node, target_ops=CONV_OPS):
return find_ancestor(graph, node, target_ops, [OpTypes.CONCAT])
def raise_if_has_pruned_input(graph, node, pruning_res):
for node_name in node.in_nodes:
input_pruning = pruning_res[node_name]
if input_pruning.removed_outputs:
input_node = graph.node(node_name)
if input_node.op.type in CONV_OPS:
prunable_node = input_node
else:
prunable_node = find_prunable_ancestor(graph, input_node)
raise errors.OptimizerNotExcludeNodeError(
('Must exclude node from pruning: {}. '
'Operation "{}" cannot take pruned tensor as input.').format(
prunable_node.name, node.op.type)) | null |
23,128 | from collections import deque
def graph_search_handler(start_node,
generator,
frontier,
handler=None,
gen_params={}):
class FIFOQueue(Queue):
def __init__(self):
def append(self, item):
def __len__(self):
def pop(self):
def __contains__(self, item):
def breadth_first_search_handler(start_node,
generator,
handler=None,
gen_params={}):
return graph_search_handler(
start_node,
generator,
frontier=FIFOQueue(),
handler=handler,
gen_params=gen_params) | null |
23,129 | import sys
from typing import List, Optional
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Operation, Node, Block
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import NndctOption, NndctScreenLogger
def reset_group_members(graph, groups, host, servent):
if servent not in groups[host]:
members = sorted(groups[host] + [servent], key=lambda n: graph.node(n).idx)
groups[host] = members
groups[servent] = members
return groups | null |
23,130 | import sys
from typing import List, Optional
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Operation, Node, Block
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import NndctOption, NndctScreenLogger
def glue_group_members(graph, groups, start_node, c_node):
assert groups[c_node][0] == c_node
name_lst = groups[start_node]
for g in groups[c_node]:
if g not in name_lst:
name_lst.append(g)
for n in name_lst:
groups[n] = name_lst
return groups
def group_up(graph, groups, OpType=None, POpType=None):
def __is_valid_parent(node):
if len(graph.children(node)) > 1 or node.op.is_custom_op:
return False
if POpType is None:
return True
elif node.op.type == POpType:
return True
return False
for n in graph.all_nodes():
if not n.in_quant_part or n.blocks:
continue
if NndctOption.nndct_stat.value > 2:
print('node name: {} parent number: {}'.format(n.name, len(graph.parents(n.name))))
if groups[n.name][0] == n.name and n.op.type == OpType and \
len(graph.parents(n.name)) == 1 and __is_valid_parent(graph.parents(n.name)[0]):
start_node = groups[graph.parents(n.name)[0].name][0]
groups = glue_group_members(graph, groups, start_node, n.name)
if NndctOption.nndct_stat.value > 2:
print('---- Grouping node %s and %s' % (start_node, n.name))
return groups | null |
23,131 | import sys
from typing import List, Optional
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Operation, Node, Block
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import NndctOption, NndctScreenLogger
def reorder_multi_subgraph_nodes(graphs: List[Graph]) -> None:
node_index = 0
for graph in graphs:
graph.clear_node_id_map()
for node in graph.nodes:
node.idx = node_index
node_index += 1 | null |
23,132 | import sys
from typing import List, Optional
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Operation, Node, Block
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import NndctOption, NndctScreenLogger
def merge_multi_subgraphs(graphs: List[Graph],
graph_name="Nndctgraph") -> Graph:
top_graph = Graph(graph_name)
for graph in graphs:
for node in graph.nodes:
top_graph.add_node(node)
return top_graph | null |
23,133 | import sys
from typing import List, Optional
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Operation, Node, Block
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import NndctOption, NndctScreenLogger
def convert_graph_to_block_node(top_graph, graph):
def merge_multi_graphs_to_single_graph(graphs, graph_name="Nndctgraph"):
top_graph = Graph(graph_name)
op = base_op.CustomOp(NNDCT_OP.PLACEHOLDER)
input_node = Node(name="input_placeholder", op=op, in_quant_part=False)
input_node.owning_graph = top_graph
op = base_op.CustomOp(NNDCT_OP.PLACEHOLDER)
return_node = Node(name="return_placeholder", op=op, in_quant_part=False)
return_node.owning_graph = top_graph
top_block = Block(top_graph, None, input_node, return_node)
top_graph.set_top_block(top_block)
for graph in graphs:
block_node = convert_graph_to_block_node(top_graph, graph)
if not block_node.in_node_list():
top_graph.append_node(block_node)
return top_graph | null |
23,134 | import sys
from typing import List, Optional
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Operation, Node, Block
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import NndctOption, NndctScreenLogger
def convert_block_node_to_graph(block_node):
top_graph = Graph(block_node.name)
top_block = block_node.blocks[0]
top_graph.add_block(top_block)
top_graph.set_top_block(top_block)
top_block.owning_graph = top_graph
def node_visitor(node, fn):
if node.blocks:
for block in node.blocks:
for b_n in block:
node_visitor(node, fn)
else:
fn(node)
def visit_fn(n):
n.owning_graph = top_graph
for i_t in n.in_tensors:
top_graph.add_tensor(i_t)
for o_t in n.out_tensors:
top_graph.add_tensor(o_t)
for _, p_t in n.op.params.items():
top_graph.add_param_name(p_t.name)
for node in top_block.nodes:
node_visitor(node, visit_fn)
return top_graph | null |
23,135 | import json
import numpy as np
from collections import OrderedDict
from enum import Enum, auto
from functools import partial
from typing import Dict, List, Callable, Optional, Union, Any, Set
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils.common import AutoName
The provided code snippet includes necessary dependencies for implementing the `_default_read_and_write_value` function. Write a Python function `def _default_read_and_write_value(value_mem: List[Any], in_out: int, attr_value: Optional[Any] = None)` to solve the following problem:
r""" if in_out == 0 stamp value in memory, otherwise get memory_value
Here is the function:
def _default_read_and_write_value(value_mem: List[Any],
in_out: int,
attr_value: Optional[Any] = None):
r""" if in_out == 0 stamp value in memory, otherwise get memory_value"""
if in_out == 0:
if isinstance(attr_value, (list, tuple, set)):
value_mem[:] = list(attr_value)
else:
value_mem[:] = [attr_value]
else:
#if len(value_mem) == 1:
if len(value_mem) == 1 and (not isinstance(value_mem[0], (tuple, list))):
return value_mem[0]
else:
return value_mem[:] | r""" if in_out == 0 stamp value in memory, otherwise get memory_value |
23,136 | import copy
from collections import defaultdict, deque, namedtuple
from typing import List
from nndct_shared.quantization import BaseQuantizer
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Node, Tensor, GraphSearcher
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import (GLOBAL_MAP, NNDCT_KEYS, QError, QWarning,
NndctOption, NndctScreenLogger)
from nndct_shared.utils import PatternType, permute_data
from .fuse_trans_matmul import TransMatmulActvHandler
from .fuse_trans_reduction_op import TransReductionOpActvHandler
from nndct_shared.optimization.fuse_pad import PadFuseHandler
from nndct_shared.optimization.merge_permute_in_linear import PermuteMergeHandler
from nndct_shared.optimization.merge_reshape import ReshapeMergeHandler
from .attr_transform import *
from .op_evaluator import Evaluator
import numpy as np
def get_deploy_graph_infos(quantizer: BaseQuantizer, deploy_graphs: List[Graph]) -> List[DeployGraphInfo]:
graph_quant_info_list = []
quant_groups = copy.deepcopy(quantizer.configer.quant_groups)
quant_config = {"param": {}, "output": {}, "input": {}}
if not NndctOption.nndct_quant_off.value:
quant_config["param"].update(quantizer.quant_config["param"])
quant_config["input"].update(quantizer.quant_config["input"])
for blob_name, quant_info in quantizer.quant_config["output"].items():
if any([blob_name in dev_graph for dev_graph in deploy_graphs]):
quant_config["output"][blob_name] = copy.deepcopy(quant_info)
else:
def find_possible_quant_pn(node):
if len(node.in_nodes) == 1 and len(node.out_tensors) == 1:
pn = quantizer.Nndctgraph.parents(node)[0]
if any([pn.name in dev_graph for dev_graph in deploy_graphs]):
if len(pn.out_tensors) == 1:
return pn
else:
return find_possible_quant_pn(pn)
node = quantizer.Nndctgraph.node(blob_name)
pn = find_possible_quant_pn(node)
if pn is not None and pn.name not in quant_config["output"]:
quant_config['output'][pn.name] = copy.deepcopy(quant_info)
for dev_graph in deploy_graphs:
graph_quant_info_list.append(DeployGraphInfo(dev_graph=dev_graph, quant_info=quant_config))
return graph_quant_info_list
def get_xmodel_and_dump_infos(quantizer: BaseQuantizer, deploy_graphs_list: List[List[Graph]]):
if len(deploy_graphs_list) == 1:
graph_quant_info = get_deploy_graph_infos(quantizer, deploy_graphs_list[0])
return graph_quant_info, graph_quant_info
elif len(deploy_graphs_list) == 2:
xmodel_quant_info = get_deploy_graph_infos(quantizer, deploy_graphs_list[0])
dump_quant_info = get_deploy_graph_infos(quantizer, deploy_graphs_list[1])
return xmodel_quant_info, dump_quant_info
else:
raise RuntimeError("Length of graphs list to deploy should be 1 or 2") | null |
23,137 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
class _Converter:
def to_xir_dtype(cls, numpy_dtype):
def to_xir_dtype_by_string(cls, dtype):
def to_xir_attr_value(cls, node_op_type, nndct_attr_name: str, nndct_attr_value: Any):
def to_numpy_dtype(cls, nndct_dtype):
def shape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
def zeros(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
class XGraph(object):
def __init__(self, name: str):
def _check_inputs(self, input_ops):
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
def get_op_by_name(self, name: str) -> Op:
def get_op_output_shape(self, name: str) -> List[int]:
def export_to_xmodel(self, fname: str) -> NoReturn:
def export_to_img(self, fname: str) -> NoReturn:
def graph(self):
def data_xop(xgraph: XGraph, node: Node,
quant_config: NndctQuantInfo) -> NoReturn:
shape = node.out_tensors[0].shape
if not shape:
shape = [1]
if shape[0] == 0:
raise DataXopError("data", shape)
# shape = permute_axes(shape, node.transpose_out_order)
try:
out_tensor = np.zeros(shape, dtype=np.float32)
attrs: Dict[str, Any] = {}
attrs["shape"] = shape
attrs["data_type"] = _Converter.to_xir_dtype(out_tensor.dtype.type)
xgraph.create_fixed_normal_op(
node.name, "data", quant_config, tensor=out_tensor, attrs=attrs)
except:
raise DataXopError("data", shape) | null |
23,138 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def const_xop(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
data = node.node_attr(node.op.AttrName.DATA)
data_type = np.dtype(node.out_tensors[0].dtype)
data_type = np.float32 if data_type == np.float64 else data_type
if not isinstance(data, list) and (not isinstance(data, np.ndarray)):
data = [data]
data = np.array(data, dtype=data_type)
data = np.transpose(data, node.transpose_out_order) if node.transpose_out_order else data
xgraph.create_fixed_const_op(name=node.name,
data=data,
quant_info=quant_config) | null |
23,139 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
attrs = None
if len(node.op.attrs) > 0:
attrs: Dict[str, Any] = {}
for attr_name, attr_value in node.op.attrs.items():
if node.op.is_xir_attr(attr_name):
attrs[attr_name.value] = _Converter.to_xir_attr_value(node.op.type, attr_name.value, attr_value.value)
return attrs
def shape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
r""" nndct shape is a macro operator, including shape, stridedslice
"""
# raise NotImplementedError("shape")
input_list = []
shape_input_ops: Dict[str, List["xir.Op"]] = {}
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
shape_input_ops["input"] = input_list
sub_op_shape = xgraph.create_fixed_normal_op(
node.name + "_i0", "shape", quant_config, input_ops=shape_input_ops)
attrs: Dict[str, Any] = {}
strided_slice_input_ops: Dict[str, List["xir.Op"]] = {}
strided_slice_input_ops["input"] = [sub_op_shape]
dim = node.node_attr(node.op.AttrName.AXIS)
attrs["begin"] = [dim]
attrs["end"] = [dim + 1]
xgraph.create_fixed_normal_op(
node.name,
"strided_slice",
quant_config,
attrs=attrs,
input_ops=strided_slice_input_ops)
def scale(xgraph, node, quant_config):
attrs: Dict[str, Any] = {}
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
if param_name == node.op.ParamName.GAMMA:
input_ops['scale'] = [xgraph.get_op_by_name(param_tensor.name)]
if param_name == node.op.ParamName.BETA:
input_ops['bias'] = [xgraph.get_op_by_name(param_tensor.name)]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(node.name, "scale", quant_config, attrs=attrs, input_ops=input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def reduction_mean(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
attrs = _get_xir_attr_from_node(node)
input_ops: Dict[str, List[Op]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.in_nodes[0])]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
in_tensor_shape = node.in_tensors[0].shape
if node.node_attr(node.op.AttrName.DIMS) == [None]:
dim_list = [i for i in range(len(in_tensor_shape))]
else:
dim_list = node.node_attr(node.op.AttrName.DIMS)
rec = 1
for i in dim_list:
rec = rec * in_tensor_shape[i]
if (rec & (rec - 1)) != 0:
xgraph.create_fixed_normal_op(
node.name + "_i0", "reduction_mean", quant_config, attrs=attrs, input_ops=input_ops)
scale = calculate_op_scale(rec, node)
scale = [scale]
xgraph.create_fixed_const_op(name=node.name + "_i1",
data=np.array(scale, dtype=np.float32),
quant_info=quant_config)
input_ops: Dict[str, List[Op]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.name + "_i0"), xgraph.get_op_by_name(node.name + "_i1")]
xgraph.create_fixed_normal_op(
node.name, "mul", quant_config, input_ops=input_ops)
else:
xgraph.create_fixed_normal_op(
node.name, "reduction_mean", quant_config, attrs=attrs, input_ops=input_ops) | null |
23,140 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _pack(xgraph: XGraph, node: Node, pack_name: str, packed_item: List[Any],
quant_config: NndctQuantInfo) -> Tuple["xir.Op", List["xir.Op"]]:
"""
pack items into stack op
"""
pack_list = []
pack_input_ops: Dict[str, List["xir.Op"]] = {}
for i, item in enumerate(packed_item):
if isinstance(item, Tensor):
pack_list.append(xgraph.get_op_by_name(item.node.name))
else:
# dtype = np.int64 if isinstance(item, int) else np.float64
dtype = np.float32
const_op = xgraph.create_fixed_const_op(
name=node.name + f"_{pack_name}_attr[{i}]",
data=np.array([item], dtype=dtype),
quant_info=quant_config)
pack_list.append(const_op)
pack_input_ops["input"] = pack_list
attrs: Dict[str, Any] = {}
attrs["axis"] = 0
sub_op_pack = xgraph.create_fixed_normal_op(
node.name + f"_{pack_name}_i0",
"stack",
quant_config,
attrs=attrs,
input_ops=pack_input_ops)
return sub_op_pack, pack_list
def shape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
r""" nndct shape is a macro operator, including shape, stridedslice
"""
# raise NotImplementedError("shape")
input_list = []
shape_input_ops: Dict[str, List["xir.Op"]] = {}
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
shape_input_ops["input"] = input_list
sub_op_shape = xgraph.create_fixed_normal_op(
node.name + "_i0", "shape", quant_config, input_ops=shape_input_ops)
attrs: Dict[str, Any] = {}
strided_slice_input_ops: Dict[str, List["xir.Op"]] = {}
strided_slice_input_ops["input"] = [sub_op_shape]
dim = node.node_attr(node.op.AttrName.AXIS)
attrs["begin"] = [dim]
attrs["end"] = [dim + 1]
xgraph.create_fixed_normal_op(
node.name,
"strided_slice",
quant_config,
attrs=attrs,
input_ops=strided_slice_input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
The provided code snippet includes necessary dependencies for implementing the `reshape` function. Write a Python function `def reshape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn` to solve the following problem:
r""" nndct reshape is a macro operator, including pack, reshape
Here is the function:
def reshape(xgraph: XGraph, node: Node,
quant_config: NndctQuantInfo) -> NoReturn:
r""" nndct reshape is a macro operator, including pack, reshape
"""
shape = node.node_attr(node.op.AttrName.SHAPE)
sub_op_pack, pack_list = _pack(xgraph, node, "shape", shape, quant_config)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["shape"] = [sub_op_pack]
input_ops["input"] = [xgraph.get_op_by_name(node.in_nodes[0])]
xgraph.create_fixed_normal_op(
node.name, "reshape", quant_config, input_ops=input_ops) | r""" nndct reshape is a macro operator, including pack, reshape |
23,141 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _pack(xgraph: XGraph, node: Node, pack_name: str, packed_item: List[Any],
quant_config: NndctQuantInfo) -> Tuple["xir.Op", List["xir.Op"]]:
"""
pack items into stack op
"""
pack_list = []
pack_input_ops: Dict[str, List["xir.Op"]] = {}
for i, item in enumerate(packed_item):
if isinstance(item, Tensor):
pack_list.append(xgraph.get_op_by_name(item.node.name))
else:
# dtype = np.int64 if isinstance(item, int) else np.float64
dtype = np.float32
const_op = xgraph.create_fixed_const_op(
name=node.name + f"_{pack_name}_attr[{i}]",
data=np.array([item], dtype=dtype),
quant_info=quant_config)
pack_list.append(const_op)
pack_input_ops["input"] = pack_list
attrs: Dict[str, Any] = {}
attrs["axis"] = 0
sub_op_pack = xgraph.create_fixed_normal_op(
node.name + f"_{pack_name}_i0",
"stack",
quant_config,
attrs=attrs,
input_ops=pack_input_ops)
return sub_op_pack, pack_list
def scale(xgraph, node, quant_config):
attrs: Dict[str, Any] = {}
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
if param_name == node.op.ParamName.GAMMA:
input_ops['scale'] = [xgraph.get_op_by_name(param_tensor.name)]
if param_name == node.op.ParamName.BETA:
input_ops['bias'] = [xgraph.get_op_by_name(param_tensor.name)]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(node.name, "scale", quant_config, attrs=attrs, input_ops=input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
The provided code snippet includes necessary dependencies for implementing the `resize` function. Write a Python function `def resize(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn` to solve the following problem:
resize is a macro operator, including concat , resize
Here is the function:
def resize(xgraph: XGraph, node: Node,
quant_config: NndctQuantInfo) -> NoReturn:
"""
resize is a macro operator, including concat , resize
"""
attrs: Dict[str, Any] = {}
# attrs["scale"] = node.node_attr(node.op.AttrName.SCALE)
attrs["align_corners"] = node.node_attr(node.op.AttrName.ALIGN_CORNERS)
attrs["half_pixel_centers"] = node.node_attr(
node.op.AttrName.HALF_PIXEL_CENTERS)
attrs["mode"] = node.node_attr(node.op.AttrName.MODE)
# attrs["mode"] = {0: "NEAREST", 3: "BILINEAR"}.get(attrs["mode"])
size = node.node_attr(node.op.AttrName.SIZE)
scale = node.node_attr(node.op.AttrName.SCALE)
# if size[0] == 0 and size[1] == 0:
if all([s == 0 for s in size]):
attrs["scale"] = scale
input_ops: Dict[str, List["xir.Op"]] = {}
input_list = []
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name, "resize", quant_config, attrs=attrs, input_ops=input_ops)
else:
sub_pack_op, pack_list = _pack(xgraph, node, "size", size, quant_config)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["size"] = [sub_pack_op]
input_list = []
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
input_ops["input"] = input_list
input_ops["input"] = [
op for op in input_ops["input"]
if op.get_name() not in [i.get_name() for i in pack_list]
]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name, "resize", quant_config, attrs=attrs, input_ops=input_ops) | resize is a macro operator, including concat , resize |
23,142 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def dense(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
input_ops: Dict[str, List["xir.Op"]] = {}
for param_name, param_tensor in node.op.params.items():
if param_name == node.op.ParamName.WEIGHTS:
weights = xgraph.get_op_by_name(param_tensor.name)
else:
bias = xgraph.get_op_by_name(param_tensor.name)
input_ops["bias"] = [bias]
input_list = []
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
input_ops["input"].append(weights)
attrs: Dict[str, Any] = {}
attrs["transpose_a"] = False
attrs["transpose_b"] = True
xgraph.create_fixed_normal_op(
node.name, "matmul", quant_config, attrs=attrs, input_ops=input_ops) | null |
23,143 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
attrs = None
if len(node.op.attrs) > 0:
attrs: Dict[str, Any] = {}
for attr_name, attr_value in node.op.attrs.items():
if node.op.is_xir_attr(attr_name):
attrs[attr_name.value] = _Converter.to_xir_attr_value(node.op.type, attr_name.value, attr_value.value)
return attrs
def scale(xgraph, node, quant_config):
attrs: Dict[str, Any] = {}
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
if param_name == node.op.ParamName.GAMMA:
input_ops['scale'] = [xgraph.get_op_by_name(param_tensor.name)]
if param_name == node.op.ParamName.BETA:
input_ops['bias'] = [xgraph.get_op_by_name(param_tensor.name)]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(node.name, "scale", quant_config, attrs=attrs, input_ops=input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def avgpool(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
scale = 1.0
if node.node_attr(node.op.AttrName.KERNEL) == [3, 3]:
scale = 9.0 * 7.0 / 64.0
elif node.node_attr(node.op.AttrName.KERNEL) == [5, 5]:
scale = 25.0 * 10.0 / 256.0
elif node.node_attr(node.op.AttrName.KERNEL) in [[6, 6], [3, 6], [6, 3]]:
scale = 36.0 * 7.0 / 256.0
elif node.node_attr(node.op.AttrName.KERNEL) == [7, 7]:
scale = 49.0 * 21.0 / 1024.0
elif node.node_attr(node.op.AttrName.KERNEL) == [14, 14]:
scale = 196.0 * 21.0 / 4096.0
else:
rec = node.node_attr(node.op.AttrName.KERNEL)[0] * node.node_attr(node.op.AttrName.KERNEL)[1]
max_factor = math.ceil(math.log(rec * 128,2))
diff = 1.0
multi_factor = 0.0
shift_factor = 0.0
for shift_factor_ in range(max_factor):
factor = round((2 ** shift_factor_)/rec)
diff_ = abs(factor / (2 ** shift_factor_) - 1/rec)
if diff_ < diff:
multi_factor = factor
diff = diff_
shift_factor = shift_factor_
scale = rec * multi_factor / (2 ** shift_factor)
attrs = _get_xir_attr_from_node(node)
# attrs: Dict[str, Any] = {}
# for attr_name, attr_value in node.op.attrs.items():
# attrs[attr_name.value] = _Converter.to_xir_attr_value(attr_name.value, attr_value.value)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.in_nodes[0])]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name + "_i0", "avgpool2d", quant_config, attrs=attrs, input_ops=input_ops)
scale = [scale]
xgraph.create_fixed_const_op(name=node.name + "_i1",
data=np.array(scale, dtype=np.float32),
quant_info=quant_config)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.name + "_i0"), xgraph.get_op_by_name(node.name + "_i1")]
xgraph.create_fixed_normal_op(
node.name, "mul", quant_config, input_ops=input_ops) | null |
23,144 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
attrs = None
if len(node.op.attrs) > 0:
attrs: Dict[str, Any] = {}
for attr_name, attr_value in node.op.attrs.items():
if node.op.is_xir_attr(attr_name):
attrs[attr_name.value] = _Converter.to_xir_attr_value(node.op.type, attr_name.value, attr_value.value)
return attrs
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def conv3d(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
attrs = _get_xir_attr_from_node(node)
attrs['kernel'] = attrs['kernel'][::-1]
attrs['stride'] = attrs['stride'][::-1]
attrs['dilation'] = attrs['dilation'][::-1]
attrs['pad'] = list(itertools.chain.from_iterable([[pad]*2 for pad in attrs['pad'][::-1]]))
print(attrs)
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
param = xgraph.get_op_by_name(param_tensor.name)
input_ops[param_name.name.lower()] = [param]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name, "conv3d", quant_config, attrs=attrs, input_ops=input_ops) | null |
23,145 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
attrs = None
if len(node.op.attrs) > 0:
attrs: Dict[str, Any] = {}
for attr_name, attr_value in node.op.attrs.items():
if node.op.is_xir_attr(attr_name):
attrs[attr_name.value] = _Converter.to_xir_attr_value(node.op.type, attr_name.value, attr_value.value)
return attrs
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def conv_transpose_3d(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
attrs = _get_xir_attr_from_node(node)
attrs['kernel'] = attrs['kernel'][::-1]
attrs['stride'] = attrs['stride'][::-1]
attrs['dilation'] = attrs['dilation'][::-1]
attrs['pad'] = list(itertools.chain.from_iterable([[pad]*2 for pad in attrs['pad'][::-1]]))
print(attrs)
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
param = xgraph.get_op_by_name(param_tensor.name)
input_ops[param_name.name.lower()] = [param]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name, "transposed_conv3d", quant_config, attrs=attrs, input_ops=input_ops) | null |
23,146 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
attrs = None
if len(node.op.attrs) > 0:
attrs: Dict[str, Any] = {}
for attr_name, attr_value in node.op.attrs.items():
if node.op.is_xir_attr(attr_name):
attrs[attr_name.value] = _Converter.to_xir_attr_value(node.op.type, attr_name.value, attr_value.value)
return attrs
def scale(xgraph, node, quant_config):
attrs: Dict[str, Any] = {}
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
if param_name == node.op.ParamName.GAMMA:
input_ops['scale'] = [xgraph.get_op_by_name(param_tensor.name)]
if param_name == node.op.ParamName.BETA:
input_ops['bias'] = [xgraph.get_op_by_name(param_tensor.name)]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(node.name, "scale", quant_config, attrs=attrs, input_ops=input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def hsigmoid(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
scale = 6.0 * 2731.0 / 16384.0
attrs = _get_xir_attr_from_node(node)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.in_nodes[0])]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name + "_i0", "hard-sigmoid", quant_config, attrs=attrs, input_ops=input_ops)
scale = [scale]
xgraph.create_fixed_const_op(name=node.name + "_i1",
data=np.array(scale, dtype=np.float32),
quant_info=quant_config)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.name + "_i0"), xgraph.get_op_by_name(node.name + "_i1")]
xgraph.create_fixed_normal_op(
node.name, "mul", quant_config, input_ops=input_ops) | null |
23,147 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
def scale(xgraph, node, quant_config):
class XGraph(object):
def __init__(self, name: str):
def _check_inputs(self, input_ops):
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
def get_op_by_name(self, name: str) -> Op:
def get_op_output_shape(self, name: str) -> List[int]:
def export_to_xmodel(self, fname: str) -> NoReturn:
def export_to_img(self, fname: str) -> NoReturn:
def graph(self):
def hswish(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
scale = 6.0 * 2731.0 / 16384.0
attrs = _get_xir_attr_from_node(node)
node_input_op = xgraph.get_op_by_name(node.in_nodes[0])
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [node_input_op]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
hsigmoid_op = xgraph.create_fixed_normal_op(
node.name + "_i0", "hard-sigmoid", quant_config, attrs=attrs, input_ops=input_ops)
scale = [scale]
const_op = xgraph.create_const_op(name=node.name + "_i1", data=np.array(scale, dtype=np.float32))
input_ops["input"] = [hsigmoid_op, const_op]
mul_op = xgraph.create_normal_op(node.name + '_mul', "mul", input_ops=input_ops)
if quant_config and node.name in quant_config['output'] and quant_config["output"][node.name][0] is not None:
mul_fp = [8, None]
mul_fp[0], _ = quant_config['output'][node.name][0]
mul_fp[1] = mul_fp[0] - 1
attrs: Dict[str, Any] = {}
attrs['fix_point'] = mul_fp[1]
attrs['bit_width'] = mul_fp[0]
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops['input'] = [mul_op]
op_name = mul_op.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
mul_fixed_op = xgraph.create_normal_op(op_name, 'fix', attrs=attrs, input_ops=input_ops)
input_ops["input"] = [mul_fixed_op, node_input_op]
else:
input_ops["input"] = [mul_op, node_input_op]
hswish_fixed_op = xgraph.create_fixed_normal_op(
node.name, "mul", quant_config, input_ops=input_ops) | null |
23,148 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
class _Converter:
_nndct2xir_type = {np.float32: "FLOAT32",
np.float64: "FLOAT64",
np.int64: "INT64",
np.int32: "INT32",
}
_nndct2numpy_type = {
"float32": np.float32,
"float64": np.float64,
"int32": np.int32,
"int64": np.int64
}
_pad_mode = {"pad_mode": {0: "FLOOR",
1: "CEIL",
2: "SAME",
3: "VALID"}
}
_nndct2xir_value = {NNDCT_OP.CONV2D: _pad_mode,
NNDCT_OP.DEPTHWISE_CONV2D: _pad_mode,
NNDCT_OP.CONVTRANSPOSE2D: _pad_mode,
NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: _pad_mode,
NNDCT_OP.MAX_POOL: _pad_mode,
NNDCT_OP.MAX_POOL1D: _pad_mode,
NNDCT_OP.AVG_POOL: _pad_mode,
NNDCT_OP.PAD: {"mode": {0: "CONSTANT", 1: "REFLECT", 2: "SYMMETRIC"}}
}
def to_xir_dtype(cls, numpy_dtype):
return cls._nndct2xir_type[numpy_dtype]
def to_xir_dtype_by_string(cls, dtype):
return {
"float32" : "FLOAT32",
"float64": "FLOAT64",
"int32" : "INT32",
"int64" : "INT64"
}.get(dtype, dtype)
def to_xir_attr_value(cls, node_op_type, nndct_attr_name: str, nndct_attr_value: Any):
if node_op_type not in cls._nndct2xir_value or nndct_attr_name not in cls._nndct2xir_value[node_op_type]:
return nndct_attr_value
else:
return cls._nndct2xir_value[node_op_type][nndct_attr_name][nndct_attr_value]
def to_numpy_dtype(cls, nndct_dtype):
return cls._nndct2numpy_type[nndct_dtype]
def _get_xir_attr_from_node(node: Node):
attrs = None
if len(node.op.attrs) > 0:
attrs: Dict[str, Any] = {}
for attr_name, attr_value in node.op.attrs.items():
if node.op.is_xir_attr(attr_name):
attrs[attr_name.value] = _Converter.to_xir_attr_value(node.op.type, attr_name.value, attr_value.value)
return attrs
def shape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
r""" nndct shape is a macro operator, including shape, stridedslice
"""
# raise NotImplementedError("shape")
input_list = []
shape_input_ops: Dict[str, List["xir.Op"]] = {}
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
shape_input_ops["input"] = input_list
sub_op_shape = xgraph.create_fixed_normal_op(
node.name + "_i0", "shape", quant_config, input_ops=shape_input_ops)
attrs: Dict[str, Any] = {}
strided_slice_input_ops: Dict[str, List["xir.Op"]] = {}
strided_slice_input_ops["input"] = [sub_op_shape]
dim = node.node_attr(node.op.AttrName.AXIS)
attrs["begin"] = [dim]
attrs["end"] = [dim + 1]
xgraph.create_fixed_normal_op(
node.name,
"strided_slice",
quant_config,
attrs=attrs,
input_ops=strided_slice_input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def custom_xop(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
shape = node.out_tensors[0].shape
if not shape:
shape = [1]
attrs = _get_xir_attr_from_node(node)
attrs = {} if attrs is None else attrs
attrs["shape"] = shape
#numpy_type = _Converter.to_numpy_dtype(node.out_tensors[0].dtype)
#attrs["data_type"] = _Converter.to_xir_dtype(numpy_type)
attrs["data_type"] = _Converter.to_xir_dtype_by_string(node.out_tensors[0].dtype)
input_ops: Dict[str, List["xir.Op"]] = {}
input_list = []
for input in node.in_tensors:
if input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name, node.op.type, quant_config, attrs=attrs, input_ops=input_ops) | null |
23,149 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
def default_xop(xop_type: str, xgraph: XGraph, node: Node,
quant_config: NndctQuantInfo) -> NoReturn:
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
param = xgraph.get_op_by_name(param_tensor.name)
input_ops[param_name.name.lower()] = [param]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
attrs = _get_xir_attr_from_node(node)
xgraph.create_fixed_normal_op(
node.name, xop_type, quant_config, attrs=attrs, input_ops=input_ops)
def to_xir(xop_type):
return partial(default_xop, xop_type) | null |
23,150 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
def binary_op(op_type: str, xgraph: XGraph, node: Node, quant_config: NndctQuantInfo):
input, other = node.node_attr(node.op.AttrName.INPUT), node.node_attr(node.op.AttrName.OTHER)
input_name = input.name if input.is_param_tensor() else input.node.name
operand1 = xgraph.get_op_by_name(input_name)
other_name = other.name if other.is_param_tensor() else other.node.name
operand2 = xgraph.get_op_by_name(other_name)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [operand1, operand2]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
xgraph.create_fixed_normal_op(node.name, op_type, quant_config, input_ops=input_ops)
def to_binary_op(xop_type):
return partial(binary_op, xop_type) | null |
23,151 | from nndct_shared.base import NNDCT_CONSTANT
def shape_attr_transform_fn(node, transpose_order):
shape = node.node_attr(node.op.AttrName.SHAPE)
new_shape = len(shape) * [None]
for i, dim in enumerate(transpose_order):
new_shape[i] = shape[dim]
node.set_node_attr(node.op.AttrName.SHAPE, new_shape) | null |
23,152 | from nndct_shared.base import NNDCT_CONSTANT
def axis_attr_transform_fn(node, transpose_order):
dim = node.node_attr(node.op.AttrName.AXIS)
new_dim = transpose_order.index(dim)
node.set_node_attr(node.op.AttrName.AXIS, new_dim) | null |
23,153 | from nndct_shared.base import NNDCT_CONSTANT
def slice_attr_transform_fn(node, transpose_order):
begin = node.node_attr(node.op.AttrName.BEGIN)
new_begin = [None] * len(begin)
for dim, pos in enumerate(begin):
new_dim = transpose_order.index(dim)
new_begin[new_dim] = pos
begin_mask = 0
for dim, pos in enumerate(new_begin):
if pos == 0:
begin_mask |= 1 << dim
node.set_node_attr(node.op.AttrName.BEGIN_MASK, begin_mask)
node.set_node_attr(node.op.AttrName.BEGIN, new_begin)
end = node.node_attr(node.op.AttrName.END)
new_end = [None] * len(end)
end_mask = 0
for dim, pos in enumerate(end):
new_dim = transpose_order.index(dim)
new_end[new_dim] = pos
for dim, pos in enumerate(new_end):
if isinstance(pos, int) and pos >= NNDCT_CONSTANT.INT_MAX:
end_mask |= 1 << dim
node.set_node_attr(node.op.AttrName.END_MASK, end_mask)
node.set_node_attr(node.op.AttrName.END, new_end)
strides = node.node_attr(node.op.AttrName.STRIDES)
new_strides = [1] * len(strides)
for dim, step in enumerate(strides):
new_dim = transpose_order.index(dim)
new_strides[new_dim] = step
node.set_node_attr(node.op.AttrName.STRIDES, new_strides) | null |
23,154 | from nndct_shared.base import NNDCT_CONSTANT
def reduce_op_attr_transform_fn(node, transpose_order):
dims = node.node_attr(node.op.AttrName.DIMS)
new_dims = [None] * len(dims)
for i, dim in enumerate(dims):
new_dim = transpose_order.index(dim)
new_dims[i] = new_dim
node.set_node_attr(node.op.AttrName.DIMS, new_dims) | null |
23,155 | from collections import defaultdict
from nndct_shared.utils import NndctDebugLogger, NndctOption
def convert_quant_config_to_dict(quant_config, init=False):
config = {'param': defaultdict(list), 'output': defaultdict(list), 'input': defaultdict(list)}
for key in quant_config.get_output_keys():
for quant_info in quant_config.get_output_quant_info(key):
bw, fp = quant_info.get_bw_fp()
if init is True:
fp = None
config["output"][key].append([bw, fp])
for key in quant_config.get_input_keys():
for quant_info in quant_config.get_input_quant_info(key):
bw, fp = quant_info.get_bw_fp()
if init is True:
fp = None
config["input"][key].append([bw, fp])
for key in quant_config.get_param_keys():
quant_info = quant_config.get_param_quant_info(key)
bw, fp = quant_info.get_bw_fp()
if init is True:
fp = None
config["param"][key].append([bw, fp])
return config | null |
23,156 | from collections import defaultdict
from nndct_shared.utils import NndctDebugLogger, NndctOption
NNDCTIR2XIR_CONVERTOR = {
# NNDCT op type: (XIR op type , XIR_CONVERT_FUNCTION)
NNDCT_OP.INPUT: ("data", data_xop),
NNDCT_OP.CONV1D: ("conv1d", to_xir("conv1d")),
NNDCT_OP.CONV2D: ("conv2d", to_xir("conv2d")),
NNDCT_OP.DEPTHWISE_CONV2D: ("depthwise-conv2d", to_xir("depthwise-conv2d")),
NNDCT_OP.CONVTRANSPOSE2D: ("transposed-conv2d", to_xir("transposed-conv2d")),
NNDCT_OP.AVG_POOL: ("avgpool2d", avgpool),
NNDCT_OP.MAX_POOL1D: ("maxpool1d", to_xir("maxpool1d")),
NNDCT_OP.MAX_POOL: ("maxpool2d", to_xir("maxpool2d")),
NNDCT_OP.RELU: ("relu", to_xir("relu")),
NNDCT_OP.LEAKY_RELU: ("leaky-relu", to_xir("leaky-relu")),
NNDCT_OP.GELU: ("gelu", to_xir("gelu")),
NNDCT_OP.PRELU: ("prelu", to_xir("prelu")),
# NNDCT_OP.SQRT: ("sqrt", to_xir("sqrt")),
NNDCT_OP.TANH: ("tanh", to_xir("tanh")),
NNDCT_OP.SIGMOID: ("sigmoid", to_xir("sigmoid")),
NNDCT_OP.DENSE: ("matmul", dense),
NNDCT_OP.MATMUL: ("matmul", to_xir("matmul")),
NNDCT_OP.RESHAPE: ("reshape", reshape),
NNDCT_OP.ADD: ("add", to_binary_op("add")),
# NNDCT_OP.SCALAR_ADD: ("add", to_binary_op("add")),
NNDCT_OP.FLATTEN: ("flatten", to_xir("flatten")),
NNDCT_OP.CONCAT: ("concat", to_xir("concat")),
NNDCT_OP.MULTIPLY: ("mul", to_binary_op("mul")),
# NNDCT_OP.SCALAR_MUL: ("mul", to_binary_op("mul")),
NNDCT_OP.STRIDED_SLICE: ("strided_slice", to_xir("strided_slice")),
NNDCT_OP.RSUB: ("sub", to_binary_op("sub")),
NNDCT_OP.SUB: ("sub", to_binary_op("sub")),
NNDCT_OP.PAD: ("pad", to_xir("pad")),
NNDCT_OP.PAD_ND: ("pad", to_xir("pad")),
NNDCT_OP.RESIZE: ("resize", resize),
NNDCT_OP.SOFTMAX: ("softmax", to_xir("softmax")),
NNDCT_OP.PERMUTE: ("transpose", to_xir("transpose")),
NNDCT_OP.CONST: ("const", const_xop),
NNDCT_OP.TENSOR: ("const", const_xop),
NNDCT_OP.RELU6: ("relu6", to_xir("relu6")),
NNDCT_OP.MEAN: ("reduction_mean", reduction_mean),
NNDCT_OP.BATCH_NORM: ("scale", scale),
# NNDCT_OP.LAYER_NORM: ("layernorm", to_xir("layernorm")),
NNDCT_OP.QUANT_STUB: ("data", data_xop),
NNDCT_OP.MAX: ("reduction_max", to_xir("reduction_max")),
NNDCT_OP.TRANSPOSE: ("transpose", to_xir("transpose")),
NNDCT_OP.SQUEEZE: ("squeeze", to_xir("squeeze")),
NNDCT_OP.ZEROS: ("const", zeros),
NNDCT_OP.NEG: ("neg", to_xir("neg")),
NNDCT_OP.DIV: ("div", to_binary_op("div")),
NNDCT_OP.SUM: ("reduction_sum", to_xir("reduction_sum")),
NNDCT_OP.HSIGMOID: ("hard-sigmoid", hsigmoid),
NNDCT_OP.HSWISH: ("hard-swish", hswish),
NNDCT_OP.PIXEL_SHUFFLE: ("pixel-shuffle", to_xir("pixel-shuffle")),
NNDCT_OP.PIXEL_UNSHUFFLE: ("pixel-shuffle", to_xir("pixel-shuffle")),
NNDCT_OP.CONV3D: ("conv3d", to_xir("conv3d")),
NNDCT_OP.DEPTHWISE_CONV3D: ("depthwise-conv3d", to_xir("depthwise-conv3d")),
NNDCT_OP.CONVTRANSPOSE3D: ("transposed-conv3d", to_xir("transposed-conv3d")),
NNDCT_OP.DEPTHWISE_CONVTRANSPOSE3D: ("transposed-depthwise-conv3d", to_xir("transposed-depthwise-conv3d")),
NNDCT_OP.RESIZE_3D: ("resize", resize),
NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: ("transposed-depthwise-conv2d", to_xir("transposed-depthwise-conv2d")),
NNDCT_OP.ARGMAX_DIM: ("argmax", to_xir('argmax')),
# NNDCT_OP.MISH: ("mish", to_xir("mish")),
# NNDCT_OP.CLAMP: ("clamp", to_xir("clamp")),
# NNDCT_OP.INSTANCE_NORM: ("instancenorm", to_xir("instancenorm")),
# NNDCT_OP.GROUP_NORM: ("groupnorm", to_xir("groupnorm"))
}
def build_xir_nndct_op_map():
from nndct_shared.compile.xop_creator import NNDCTIR2XIR_CONVERTOR
supported_nndct = []
xir2nndct = defaultdict(set)
for nndct_op_type, (xir_op_type, _) in NNDCTIR2XIR_CONVERTOR.items():
xir2nndct[xir_op_type].add(nndct_op_type)
supported_nndct.append(nndct_op_type)
return supported_nndct, xir2nndct | null |
23,157 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
_SIMULATION_PATTERNS = [
{"name": "conv2d_fix_with_hardwish",
"nodes":[("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
("conv2d", Node({"conv2d", "matmul", "scale"})),
("conv2d_out", Node({"float2fix"})),
("hsigmoid_in", Node({"fix2float"})),
("hsigmoid", Node({"hard-sigmoid"})),
("mul", Node({"mul"})),
("hsigmoid_out", Node({"float2fix"})),
("hswish_i0", Node({"fix2float"})),
("hswish_i1", Node({"fix2float"})),
("hswish", Node({"mul"})),
("output", Node({"float2fix"})),
],
"edges": [("weights", "conv2d"),
("bias", "conv2d"),
("input", "conv2d"),
("conv2d", "conv2d_out"),
("conv2d_out", "hsigmoid_in"),
("hsigmoid_in", "hsigmoid"),
("hsigmoid", "mul"),
("mul", "hsigmoid_out"),
("conv2d_out", "hswish_i0"),
("hsigmoid_out", "hswish_i1"),
("hswish_i0", "hswish"),
("hswish_i1", "hswish"),
("hswish", "output"),
]
},
{"name": "conv2d_fix_with_hardsigmoid",
"nodes":[("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
("conv2d", Node({"conv2d", "matmul", "scale"})),
("conv2d_out", Node({"float2fix"})),
("hsigmoid_in", Node({"fix2float"})),
("hsigmoid", Node({"hard-sigmoid"})),
("mul", Node({"mul"})),
("output", Node({"float2fix"}))
],
"edges": [("weights", "conv2d"),
("bias", "conv2d"),
("input", "conv2d"),
("conv2d", "conv2d_out"),
("conv2d_out", "hsigmoid_in"),
("hsigmoid_in", "hsigmoid"),
("hsigmoid", "mul"),
("mul", "output")]
},
{"name": "conv2d_fix_with_relu",
"nodes":[("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
# ("conv2d", Node({"conv2d", "matmul", "depthwise-conv2d", "transposed-conv2d", "transposed-depthwise-conv2d", "scale"})),
("conv2d", Node({"matmul", "scale"})),
("relu", Node({"relu", "prelu", "leaky-relu", "relu6"})),
("output", Node({"float2fix"}))
],
"edges": [("weights", "conv2d"), ("bias", "conv2d"), ("input", "conv2d"), ("conv2d", "relu"), ("relu", "output")]
},
{"name": "conv2d_fix_without_relu",
"nodes": [("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
# ("conv2d", Node({"conv2d", "matmul", "depthwise-conv2d", "transposed-conv2d", "scale"})),
("conv2d", Node({"matmul", "scale"})),
("output", Node({"float2fix"}))
],
"edges": [("weights", "conv2d"), ("bias", "conv2d"), ("input", "conv2d"), ("conv2d", "output")]
},
{
"name": "reduction_mean_with_mul_relu",
"nodes": [
("input", Node({"fix2float"})),
("reduction_mean", Node({"reduction_mean"})),
("const", Node({"const"})),
("mul", Node({"mul"})),
("relu", Node({"relu","prelu", "leaky-relu", "relu6"})),
("output", Node({"float2fix"})),
],
"edges": [("input", "reduction_mean"), ("reduction_mean", "mul"), ("const", "mul"), ("mul", "relu"), ("relu", "output")]
},
{
"name": "reduction_mean_with_mul",
"nodes": [
("input", Node({"fix2float"})),
("reduction_mean", Node({"reduction_mean"})),
("const", Node({"const"})),
("mul", Node({"mul"})),
("output", Node({"float2fix"})),
],
"edges": [("input", "reduction_mean"), ("reduction_mean", "mul"), ("const", "mul"), ("mul", "output")]
},
# {
# "name": "reduction_mean_with_relu",
# "nodes": [
# ("input", Node({"fix2float"})),
# ("reduction_mean", Node({"reduction_mean"})),
# ("relu", Node({"relu","prelu", "leaky-relu", "relu6"})),
# ("output", Node({"float2fix"})),
# ],
# "edges": [("input", "reduction_mean"), ("reduction_mean", "relu"), ("relu", "output")]
# },
# {"name": "pool_with_mul",
# "nodes": [("input", Node({"fix2float"})),
# ("pool", Node({"avgpool2d"})),
# ("const", Node({"const"})),
# ("mul", Node({"mul"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "pool"), ("pool", "mul"), ("const", "mul"), ("mul", "output")]
# },
# {"name": "pool_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("pool", Node({"avgpool2d", "maxpool2d"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "pool"), ("pool", "output")]
# },
# { "name": "eltwise_fix_with_relu",
# "nodes": [("add", Node({"add", "mul"})),
# ("relu", Node({"relu"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("add", "relu"), ("relu", "output")]
# },
# { "name": "eltwise_fix",
# "nodes": [("add", Node({"add", "mul"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("add", "output")]
# },
# { "name": "concat_fix",
# "nodes": [("concat", Node({"concat"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("concat", "output")]
# },
# {
# "name": "resize_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("resize", Node({"resize"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "resize"), ("resize", "output")]
# },
# {
# "name": "pad_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("pad", Node({"pad"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "pad"), ("pad", "output")]
# },
# {
# "name": "reduction_max_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("reduction_max", Node({"reduction_max"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "reduction_max"), ("reduction_max", "output")],
# },
# {
# "name": "reshape_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("reshape", Node({"reshape"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "reshape"), ("reshape", "output")],
# },
# {
# "name": "hsigmoid_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("hsigmoid", Node({"hard-sigmoid"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "hsigmoid"), ("hsigmoid", "output")],
# },
# {
# "name": "reduction_mean",
# "nodes": [("input", Node({"fix"})),
# ("reduction_mean", Node({"reduction_mean"})),
# ("output", Node({"fix"}))
# ],
# "edges": [("input", "reduction_mean"), ("reduction_mean", "output")],
# },
# {
# "name": "reduce_max",
# "nodes": [("input", Node({"fix"})),
# ("poollikeop", Node({"reduction_max"})),
# ("output", Node({"fix"}))
# ],
# "edges": [("input", "poollikeop"), ("poollikeop", "output")],
# },
]
class Graph(object):
def __init__(self, name):
self._graph = nx.DiGraph(name=name)
def __str__(self):
print_str = ""
sorted_nodes = nx.algorithms.topological_sort(self._graph)
for node in sorted_nodes:
print_str += f"{node}:{'/'.join(self.get_node_types(node))}\n"
return print_str
def __eq__(self, other):
def node_match(node_1, node_2):
return node_1.get_types() == node_2.get_types()
return is_isomorphic(self.graph, other.graph, node_match=isomorphism.generic_node_match("node", None, node_match))
def add_node(self, id, obj):
self._graph.add_node(id, node=obj)
def add_edge(self, u, v):
self._graph.add_edge(u, v)
def children(self, n):
return list(self._graph.successors(n))
def parents(self, n):
return list(self._graph.predecessors(n))
def get_node_types(self, id):
return self._graph.nodes[id]["node"].get_types()
def set_node_types(self, id, types):
return self._graph.nodes[id]["node"].set_types(types)
def remove_node(self, n):
return self._graph.remove_node(n)
def remove_one_node(self, n):
assert len(self.parents(n)) <= 1
if len(self.parents(n)) == 0:
self.remove_node(n)
else:
pn = self.parents(n)[0]
for cn in self.children(n):
self.remove_edge(n, cn)
self.add_edge(pn, cn)
self.remove_node(n)
def remove_edge(self, u, v):
self._graph.remove_edge(u, v)
def visualize(self):
import matplotlib.pyplot as plt
lables = {}
for node in self.nodes:
lables[node] = "/".join(self.get_node_types(node))
fig = plt.figure()
# pos = nx.nx_pydot.graphviz_layout(self._graph)
nx.draw_networkx(self._graph, labels=lables, node_color="white", alpha=.5)
plt.savefig(f"{self._graph.name}.png")
plt.close()
def copy(cls, original_graph):
graph_copied = cls(original_graph.name)
graph_copied._graph = original_graph.graph.copy()
return graph_copied
def nodes(self):
for node in self._graph:
yield node
def op_types(self):
op_types = set()
for n in self._graph:
op_types.update(self.get_node_types(n))
return op_types
def graph(self):
return self._graph
def name(self):
return self._graph.name
def _gen_pattern_from_sim_pattern():
pattern_graphs = []
patterns = copy.deepcopy(_SIMULATION_PATTERNS)
for pattern_info in patterns:
pattern_graph = Graph(pattern_info["name"])
for id, attr in pattern_info["nodes"]:
pattern_graph.add_node(id, attr)
for u, v in pattern_info["edges"]:
pattern_graph.add_edge(u, v)
pattern_graphs.append(pattern_graph)
return pattern_graphs | null |
23,158 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
class XIRHelper(object):
def find_xops_from_nndct_node(cls, nndct_node, xmodel):
xop_lst = []
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", nndct_node.name)
for xop in cls.get_xmodel_ops(xmodel):
if cls.get_xop_type(xop) in ["download", "upload", "fix2float", "float2fix", "transpose", "fix", "data-fix"]:
continue
if formal_name in cls.get_xop_name(xop):
xop_lst.append(xop)
return xop_lst
def get_xop_device_type(xop):
if xop.has_attr("device"):
return xop.get_attr("device")
else:
return None
def get_xop_name(xop):
return xop.get_name()
def get_xop_template_name(op_template):
return op_template.get_name()
def get_xop_template_types(op_template):
return op_template.get_types()
def get_xmodel_ops(xmodel):
return xmodel.get_ops()
def get_xop_type(xop):
return xop.get_type()
def get_input_xops(xop):
return xop.get_input_ops()["input"]
def get_op_partition_msg(xop):
msg = ""
if xop and xop.has_attr("partition_msg"):
msg = xop.get_attr("partition_msg")
elif xop and xop.has_attr("error_msg"):
msg = xop.get_attr("error_msg")
return msg
def is_dpu_pattern(cls, xmodel):
for xop in cls.get_xmodel_ops(xmodel):
if cls.get_xop_device_type(xop) == "CPU":
if cls.get_xop_type(xop) == "reshape-fix":
input_op = cls.get_input_xops(xop)[0]
if cls.get_xop_type(input_op) not in ["data", "data-fix"]:
return False
elif cls.get_xop_type(xop) not in ["fix2float", "download"]:
return False
elif cls.get_xop_device_type(xop) is None:
return False
return True
def get_pattern_partition_msg(cls, xmodel):
msg = ""
for xop in cls.get_xmodel_ops(xmodel):
msg += cls.get_op_partition_msg(xop)
return msg
def is_valid_compiled_pattern(cls, xmodel):
for xop in cls.get_xmodel_ops(xmodel):
if xop is None or xop.has_attr("error_msg"):
return False
if any([cls.get_xop_device_type(xop) is None for xop in cls.get_xmodel_ops(xmodel)]):
return False
return True
def is_valid_template(ops):
fix = {"fix"}
float2fix = {"float2fix"}
fix2float = {"fix2float"}
argmax = {"argmax"}
data = {"data"}
const = {"const"}
false_msg = "This pattern is not for quantization."
float_template = False
op_types = set()
for op in ops:
op_types.update(XIRHelper.get_xop_template_types(op))
if fix.issubset(op_types):
msg = "This is a transfer pass template."
return False, msg
elif not (fix2float.issubset(op_types) or float2fix.issubset(op_types)):
msg = "There is no fix in template."
return False, msg
elif all([{op_type} in [fix2float, float2fix] for op_type in op_types]):
msg = "Only fix in template"
return False, msg
elif argmax.issubset(op_types):
msg = "argmax template is ignored."
return False, msg
elif len(ops) == 2 and (data.issubset(op_types) or const.issubset(op_types)):
msg = "data-fix/const-fix are ignored."
return False, msg
return True, "" | null |
23,159 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
def get_templates_from_dpu_compiler():
"""
get pattern info from compiler
"""
import xir
import xcompiler
templates = xcompiler.get_templates()
topsorted_templates = []
for template in templates:
topsorted_templates.append((template.get_name(), [op for op in template.toposort()]))
return topsorted_templates
def is_valid_pattern(pattern):
fix = {"fix"}
float2fix = {"float2fix"}
fix2float = {"fix2float"}
argmax = {"argmax"}
data = {"data"}
const = {"const"}
op_types = set()
for node in pattern.nodes:
op_types.update(pattern.get_node_types(node))
if fix.issubset(op_types):
msg = "This is a transfer pass template."
return False, msg
elif not (fix2float.issubset(op_types) or float2fix.issubset(op_types)):
msg = "There is no fix in template."
return False, msg
elif all([{op_type} in [fix2float, float2fix] for op_type in op_types]):
msg = "Only fix in template"
return False, msg
elif argmax.issubset(op_types):
msg = "argmax template is ignored."
return False, msg
elif len(list(pattern.nodes)) == 2 and (data.issubset(op_types) or const.issubset(op_types)):
msg = "data-fix/const-fix are ignored."
return False, msg
if not nx.algorithms.is_directed_acyclic_graph(pattern.graph):
msg = f"{pattern.name} has cycles, please contact developer to fix it."
return False, msg
return True, ""
def reorder_patterns(patterns):
new_patterns = []
pattern_len_map = {}
pattern_map = {pattern.name: pattern for pattern in patterns}
fix_type = {NNDCT_OP.FIX}
for pattern in patterns:
pattern_len = 0
for node in pattern.nodes:
if pattern.get_node_types(node) != fix_type:
pattern_len += 1
pattern_len_map[pattern.name] = pattern_len
sorted_patterns = sorted(pattern_len_map.items(), key=lambda x: x[1], reverse=True)
log_debug_info(f"==============sorted patterns(total {len(sorted_patterns)} patterns)====================")
for pattern_name, _, in sorted_patterns:
log_debug_info(pattern_name)
log_debug_info(str(pattern_map[pattern_name]))
return [pattern_map[pattern_name] for pattern_name, _ in sorted_patterns]
def create_pattern_graph(name: str, ops: "List[xir.op_template]"):
pattern_graph = Graph(name)
for op in ops:
pattern_graph.add_node(get_op_name(op), Node(op_types=get_op_type(op)))
for op in ops:
for inp in get_input_ops(op):
pattern_graph.add_edge(get_op_name(inp), get_op_name(op))
for outp in get_output_ops(op):
pattern_graph.add_edge(get_op_name(op), get_op_name(outp))
return pattern_graph
def convert_xir_type_to_nndct_type(pattern_graph):
for node in pattern_graph.nodes:
xir_types = pattern_graph.get_node_types(node)
nndct_types = set()
for ty in xir_types:
if ty in _XIR2NNCT:
nndct_types.update(_XIR2NNCT.get(ty, {ty}))
if nndct_types:
pattern_graph.set_node_types(node, nndct_types)
else:
return False
return True
def transform_pattern_graph(pattern_graph):
_merge_float2fix_fix2float_pair(pattern_graph)
_convert_fix_like_op_to_fix(pattern_graph)
_merge_mul_coeff(pattern_graph)
_remove_mul_for_hswish(pattern_graph)
def log_debug_info(msg):
if NndctOption.nndct_inspect_debug.value:
NndctDebugLogger.write(f"{msg}\n")
class XIRHelper(object):
def find_xops_from_nndct_node(cls, nndct_node, xmodel):
xop_lst = []
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", nndct_node.name)
for xop in cls.get_xmodel_ops(xmodel):
if cls.get_xop_type(xop) in ["download", "upload", "fix2float", "float2fix", "transpose", "fix", "data-fix"]:
continue
if formal_name in cls.get_xop_name(xop):
xop_lst.append(xop)
return xop_lst
def get_xop_device_type(xop):
if xop.has_attr("device"):
return xop.get_attr("device")
else:
return None
def get_xop_name(xop):
return xop.get_name()
def get_xop_template_name(op_template):
return op_template.get_name()
def get_xop_template_types(op_template):
return op_template.get_types()
def get_xmodel_ops(xmodel):
return xmodel.get_ops()
def get_xop_type(xop):
return xop.get_type()
def get_input_xops(xop):
return xop.get_input_ops()["input"]
def get_op_partition_msg(xop):
msg = ""
if xop and xop.has_attr("partition_msg"):
msg = xop.get_attr("partition_msg")
elif xop and xop.has_attr("error_msg"):
msg = xop.get_attr("error_msg")
return msg
def is_dpu_pattern(cls, xmodel):
for xop in cls.get_xmodel_ops(xmodel):
if cls.get_xop_device_type(xop) == "CPU":
if cls.get_xop_type(xop) == "reshape-fix":
input_op = cls.get_input_xops(xop)[0]
if cls.get_xop_type(input_op) not in ["data", "data-fix"]:
return False
elif cls.get_xop_type(xop) not in ["fix2float", "download"]:
return False
elif cls.get_xop_device_type(xop) is None:
return False
return True
def get_pattern_partition_msg(cls, xmodel):
msg = ""
for xop in cls.get_xmodel_ops(xmodel):
msg += cls.get_op_partition_msg(xop)
return msg
def is_valid_compiled_pattern(cls, xmodel):
for xop in cls.get_xmodel_ops(xmodel):
if xop is None or xop.has_attr("error_msg"):
return False
if any([cls.get_xop_device_type(xop) is None for xop in cls.get_xmodel_ops(xmodel)]):
return False
return True
def build_patterns_from_dpu_templates():
templates = get_templates_from_dpu_compiler()
log_debug_info("\nAll patterns from xcompiler:")
for id, (name, ops) in enumerate(templates):
log_debug_info(f"pattern id:{id}")
for op in ops:
log_debug_info(f"op name:{XIRHelper.get_xop_template_name(op)} type:{XIRHelper.get_xop_template_types(op)}")
patterns = []
pattern_graphs = []
for id, (name, ops) in enumerate(templates):
pattern_graph = create_pattern_graph(f"{name}_{id}", ops)
ret, msg = is_valid_pattern(pattern_graph)
if ret:
pattern_graphs.append(pattern_graph)
else:
log_debug_info(f"{pattern_graph.name} is filtered.({msg}).")
# pattern_graphs = pattern_graphs + _gen_pattern_from_sim_pattern()
log_debug_info("\nPattern Transformation:")
for pattern_graph in pattern_graphs:
log_debug_info(f"{pattern_graph.name} pattern")
log_debug_info("================Before transformation====================")
log_debug_info(str(pattern_graph))
transform_pattern_graph(pattern_graph)
if convert_xir_type_to_nndct_type(pattern_graph):
patterns.append(pattern_graph)
else:
log_debug_info(f"{pattern_graph.name} is ignored for there is at least one unknown op in the pattern.")
log_debug_info("================After transformation====================")
log_debug_info(str(pattern_graph))
patterns = reorder_patterns(patterns)
return patterns | null |
23,160 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
class Graph(object):
def __init__(self, name):
self._graph = nx.DiGraph(name=name)
def __str__(self):
print_str = ""
sorted_nodes = nx.algorithms.topological_sort(self._graph)
for node in sorted_nodes:
print_str += f"{node}:{'/'.join(self.get_node_types(node))}\n"
return print_str
def __eq__(self, other):
def node_match(node_1, node_2):
return node_1.get_types() == node_2.get_types()
return is_isomorphic(self.graph, other.graph, node_match=isomorphism.generic_node_match("node", None, node_match))
def add_node(self, id, obj):
self._graph.add_node(id, node=obj)
def add_edge(self, u, v):
self._graph.add_edge(u, v)
def children(self, n):
return list(self._graph.successors(n))
def parents(self, n):
return list(self._graph.predecessors(n))
def get_node_types(self, id):
return self._graph.nodes[id]["node"].get_types()
def set_node_types(self, id, types):
return self._graph.nodes[id]["node"].set_types(types)
def remove_node(self, n):
return self._graph.remove_node(n)
def remove_one_node(self, n):
assert len(self.parents(n)) <= 1
if len(self.parents(n)) == 0:
self.remove_node(n)
else:
pn = self.parents(n)[0]
for cn in self.children(n):
self.remove_edge(n, cn)
self.add_edge(pn, cn)
self.remove_node(n)
def remove_edge(self, u, v):
self._graph.remove_edge(u, v)
def visualize(self):
import matplotlib.pyplot as plt
lables = {}
for node in self.nodes:
lables[node] = "/".join(self.get_node_types(node))
fig = plt.figure()
# pos = nx.nx_pydot.graphviz_layout(self._graph)
nx.draw_networkx(self._graph, labels=lables, node_color="white", alpha=.5)
plt.savefig(f"{self._graph.name}.png")
plt.close()
def copy(cls, original_graph):
graph_copied = cls(original_graph.name)
graph_copied._graph = original_graph.graph.copy()
return graph_copied
def nodes(self):
for node in self._graph:
yield node
def op_types(self):
op_types = set()
for n in self._graph:
op_types.update(self.get_node_types(n))
return op_types
def graph(self):
return self._graph
def name(self):
return self._graph.name
def drop_fix_in_pattern(pattern_graph):
fix = {NNDCT_OP.FIX}
removed_node = []
pattern_without_fix = Graph.copy(pattern_graph)
for node in pattern_graph.nodes:
if pattern_without_fix.get_node_types(node) == fix:
removed_node.append(node)
for node in removed_node:
pattern_without_fix.remove_one_node(node)
return pattern_without_fix | null |
23,161 | from typing import Mapping
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.expanding.op_modifier import op_modifier
from nndct_shared.expanding.spec import DataInsert, GenericStructuredExpanding, StructuredExpanding
from nndct_shared.expanding.op_modifier import op_modifier
def propagate_node_expanding(node, node_expand_desc: Mapping[str, StructuredExpanding]):
class Graph(GraphBase):
def __init__(self, graph_name=None):
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
def __deepcopy__(self, memo):
def clone(self):
def clone_from(self, src_graph):
def create_node_from(self, src_node, local_map, converted_nodes):
def node(self, name):
def get_node_by_idx(self, idx):
def get_input_nodes(self):
def get_input_tensors(self, input_args):
def get_return_tensors(self):
def add_node(self, node: Node) -> None:
def free_node(self, node):
def remove_node(self, node):
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
def reconnect_nodes(self):
def connect_nodes(self):
def parents(self, node: Union[Node, str]) -> List[Node]:
def children(self, node: Union[Node, str]) -> List[Node]:
def add_tensor(self, tensor):
def tensor(self, name):
def param_tensor(self, name):
def add_end_tensor(self, tensor):
def __repr__(self):
def __str__(self):
def description(self):
def set_node_id(self, index, node):
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
def get_topological_graph_nodes_list(self):
def name(self):
def name(self, name):
def nodes(self):
def reverse_nodes(self):
def tensors(self):
def end_tensors(self):
def inputs(self):
def outputs(self):
def op_types(self):
def append_node(self, node):
def add_param_name(self, param_name):
def param_names(self):
def block(self):
def is_tensor_in_graph(self, tensor_name):
def update_node_idx(self, node, index):
def clear_node_id_map(self):
def remove_tensor(self, tensor):
def insert_node_between_nodes(self, new_node, parent_node, child_node):
def set_top_block(self, block):
def add_block(self, block):
def all_blocks(self):
def all_nodes(self):
def head_node(self):
def return_node(self):
def clean_tensors_data(self):
def assign_node_topological_name(self, prefix="", suffix=""):
def _assgin_nodes(nodes):
def simple_description(self):
def get_node_simple_info(node):
def get_md5(self):
class Node(NodeBase):
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
def __repr__(self):
def __str__(self):
def __deepcopy__(self, memo):
def clone_from(self, src_node, local_map):
def scope_name(self):
def scope_name(self, name):
def description(self):
def clean_connections(self):
def add_in_node(self, node_name: str):
def add_out_node(self, node_name: str):
def in_tensors(self):
def out_tensors(self):
def in_nodes(self):
def out_nodes(self):
def node_attr(self, key):
def set_node_attr(self, key, value):
def node_config(self, key):
def set_node_config(self, key, value):
def has_bound_params(self):
def op_type(self):
def name(self):
def name(self, value):
def idx(self):
def idx(self, index):
def op(self):
def op(self, op):
def dtype(self):
def in_quant_part(self) -> bool:
def in_quant_part(self, quant_state: bool) -> None:
def module(self):
def module(self, module):
def blocks(self):
def add_block(self, block):
def has_custom_op(self):
def get_attr_val(self, attr_name):
def merged(self):
def merged(self, flag):
def transpose_in_order(self):
def transpose_in_order(self, order):
def transpose_out_order(self):
def transpose_out_order(self, order):
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
def destroy(self):
def remove_output(self, i):
def replace_input_at(self, i, new_tensor):
def remove_input(self, i):
def remove_all_inputs(self):
def drop_input(self, i):
def find_use_for_input(self, i):
def owning_block(self):
def owning_block(self, block):
def owning_graph(self):
def owning_graph(self, graph):
def topo_position(self):
def topo_position(self, pos):
def insert_before(self, node):
def insert_after(self, node):
def update_topo_position(self):
def next_node(self):
def next_node(self, node):
def prev_node(self):
def prev_node(self, node):
def in_node_list(self):
def remove_from_list(self):
def add_in_tensor(self, tensor):
def add_out_tensor(self, tensor):
def target_device(self):
def target_device(self, device):
def scope_name(self):
def scope_name(self, scope_name):
def source_range(self):
def source_range(self, source_range):
def normalized_name(self):
def normalized_name(self, name):
op_modifier = registry.Registry("expanding Modifier Functions")
])
])
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
def node_name(self) -> str:
def in_dim(self) -> int:
def in_dim(self, v: int) -> None:
def out_dim(self) -> int:
def out_dim(self, v: int) -> None:
def added_out_channel(self) -> int:
def added_in_channel(self) -> int:
def out_inserts(self) -> List[DataInsert]:
def update_node_by_expanding(graph: Graph, node: Node, node_expand_desc: Mapping[str, StructuredExpanding]):
op_type = node.op.type
if op_type in op_modifier:
mod_func = op_modifier.lookup(op_type)
mod_func(graph, node, node_expand_desc)
else:
propagate_node_expanding(node, node_expand_desc) | null |
23,162 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
def _modify_depthwise(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, WeightedNodeStructuredExpanding), \
"Variable node_expanding here has to be instance of WeightedNodeStructuredExpanding"
input_expanding = expanding_desc[node.in_nodes[0]]
dw_multiplier = node.op.attr['out_dim'] // node.op.attr['in_dim']
node.op.attr["group"] += input_expanding.added_out_channel
node.op.attr['in_dim'] += input_expanding.added_out_channel
node.op.attr['out_dim'] += input_expanding.added_out_channel * dw_multiplier
node_expanding.in_dim = node.op.attr['in_dim']
node_expanding.out_dim = node.op.attr['out_dim']
for input_insert in input_expanding.out_inserts:
node_expanding.add_weight_out_insert(
DataInsert(input_insert.position * dw_multiplier, input_insert.added_num_channels * dw_multiplier))
node_expanding.add_bias_insert(
DataInsert(input_insert.position * dw_multiplier, input_insert.added_num_channels * dw_multiplier))
OpTypes.DEPTHWISE_CONV2D, OpTypes.DEPTHWISE_CONV3D,
OpTypes.DEPTHWISE_CONVTRANSPOSE2D, OpTypes.DEPTHWISE_CONVTRANSPOSE3D
def modify_depthwise_conv(graph, node, pruning_res):
_modify_depthwise(graph, node, pruning_res) | null |
23,163 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
def _set_input_by_upstream(node: Node, expanding_desc: Mapping[str, StructuredExpanding]) -> StructuredExpanding:
def _modify_depthwise(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
def node_name(self) -> str:
def in_dim(self) -> int:
def in_dim(self, v: int) -> None:
def out_dim(self) -> int:
def out_dim(self, v: int) -> None:
def added_out_channel(self) -> int:
def added_in_channel(self) -> int:
def out_inserts(self) -> List[DataInsert]:
class Graph(GraphBase):
def __init__(self, graph_name=None):
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
def __deepcopy__(self, memo):
def clone(self):
def clone_from(self, src_graph):
def create_node_from(self, src_node, local_map, converted_nodes):
def node(self, name):
def get_node_by_idx(self, idx):
def get_input_nodes(self):
def get_input_tensors(self, input_args):
def get_return_tensors(self):
def add_node(self, node: Node) -> None:
def free_node(self, node):
def remove_node(self, node):
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
def reconnect_nodes(self):
def connect_nodes(self):
def parents(self, node: Union[Node, str]) -> List[Node]:
def children(self, node: Union[Node, str]) -> List[Node]:
def add_tensor(self, tensor):
def tensor(self, name):
def param_tensor(self, name):
def add_end_tensor(self, tensor):
def __repr__(self):
def __str__(self):
def description(self):
def set_node_id(self, index, node):
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
def get_topological_graph_nodes_list(self):
def name(self):
def name(self, name):
def nodes(self):
def reverse_nodes(self):
def tensors(self):
def end_tensors(self):
def inputs(self):
def outputs(self):
def op_types(self):
def append_node(self, node):
def add_param_name(self, param_name):
def param_names(self):
def block(self):
def is_tensor_in_graph(self, tensor_name):
def update_node_idx(self, node, index):
def clear_node_id_map(self):
def remove_tensor(self, tensor):
def insert_node_between_nodes(self, new_node, parent_node, child_node):
def set_top_block(self, block):
def add_block(self, block):
def all_blocks(self):
def all_nodes(self):
def head_node(self):
def return_node(self):
def clean_tensors_data(self):
def assign_node_topological_name(self, prefix="", suffix=""):
def _assgin_nodes(nodes):
def simple_description(self):
def get_node_simple_info(node):
def get_md5(self):
class Node(NodeBase):
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
def __repr__(self):
def __str__(self):
def __deepcopy__(self, memo):
def clone_from(self, src_node, local_map):
def scope_name(self):
def scope_name(self, name):
def description(self):
def clean_connections(self):
def add_in_node(self, node_name: str):
def add_out_node(self, node_name: str):
def in_tensors(self):
def out_tensors(self):
def in_nodes(self):
def out_nodes(self):
def node_attr(self, key):
def set_node_attr(self, key, value):
def node_config(self, key):
def set_node_config(self, key, value):
def has_bound_params(self):
def op_type(self):
def name(self):
def name(self, value):
def idx(self):
def idx(self, index):
def op(self):
def op(self, op):
def dtype(self):
def in_quant_part(self) -> bool:
def in_quant_part(self, quant_state: bool) -> None:
def module(self):
def module(self, module):
def blocks(self):
def add_block(self, block):
def has_custom_op(self):
def get_attr_val(self, attr_name):
def merged(self):
def merged(self, flag):
def transpose_in_order(self):
def transpose_in_order(self, order):
def transpose_out_order(self):
def transpose_out_order(self, order):
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
def destroy(self):
def remove_output(self, i):
def replace_input_at(self, i, new_tensor):
def remove_input(self, i):
def remove_all_inputs(self):
def drop_input(self, i):
def find_use_for_input(self, i):
def owning_block(self):
def owning_block(self, block):
def owning_graph(self):
def owning_graph(self, graph):
def topo_position(self):
def topo_position(self, pos):
def insert_before(self, node):
def insert_after(self, node):
def update_topo_position(self):
def next_node(self):
def next_node(self, node):
def prev_node(self):
def prev_node(self, node):
def in_node_list(self):
def remove_from_list(self):
def add_in_tensor(self, tensor):
def add_out_tensor(self, tensor):
def target_device(self):
def target_device(self, device):
def scope_name(self):
def scope_name(self, scope_name):
def source_range(self):
def source_range(self, source_range):
def normalized_name(self):
def normalized_name(self, name):
def is_depthwise_conv(op):
])
def modify_conv2d(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
# In pytorch, dw conv is repesented by conv2d with groups == in_channels and
# out_channels == K * in_channels, where K is a positive integer.
if is_depthwise_conv(node.op):
_modify_depthwise(graph, node, expanding_desc)
return
assert node.op.attr['group'] == 1, 'Grouped convolution is not allowed.'
node_expanding = _set_input_by_upstream(node, expanding_desc)
node.op.attr["in_dim"] += node_expanding.added_in_channel
node.op.attr["out_dim"] += node_expanding.added_out_channel
node_expanding.in_dim = node.op.attr["in_dim"]
node_expanding.out_dim = node.op.attr["out_dim"] | null |
23,164 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class DataInsert(object):
def __init__(self, position: int = 0, added_num_channels: int = 0, added_data: Tensor = None) -> None:
def position(self) -> int:
def added_num_channels(self) -> int:
def added_data(self) -> Tensor:
def added_data(self, data: Tensor) -> None:
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
def node_name(self) -> str:
def in_dim(self) -> int:
def in_dim(self, v: int) -> None:
def out_dim(self) -> int:
def out_dim(self, v: int) -> None:
def added_out_channel(self) -> int:
def added_in_channel(self) -> int:
def out_inserts(self) -> List[DataInsert]:
class InstanceNormStructuredExpanding(StructuredExpanding):
def __init__(self, node_name: str) -> None:
def added_out_channel(self) -> int:
def added_in_channel(self) -> int:
def beta_inserts(self) -> List[DataInsert]:
def beta_inserts(self, v: List[DataInsert]) -> None:
def gamma_inserts(self) -> List[DataInsert]:
def gamma_inserts(self, v: List[DataInsert]) -> None:
def out_inserts(self) -> List[DataInsert]:
def add_beta_insert(self, weight_insert: DataInsert):
def add_gamma_insert(self, weight_insert: DataInsert):
class Graph(GraphBase):
def __init__(self, graph_name=None):
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
def __deepcopy__(self, memo):
def clone(self):
def clone_from(self, src_graph):
def create_node_from(self, src_node, local_map, converted_nodes):
def node(self, name):
def get_node_by_idx(self, idx):
def get_input_nodes(self):
def get_input_tensors(self, input_args):
def get_return_tensors(self):
def add_node(self, node: Node) -> None:
def free_node(self, node):
def remove_node(self, node):
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
def reconnect_nodes(self):
def connect_nodes(self):
def parents(self, node: Union[Node, str]) -> List[Node]:
def children(self, node: Union[Node, str]) -> List[Node]:
def add_tensor(self, tensor):
def tensor(self, name):
def param_tensor(self, name):
def add_end_tensor(self, tensor):
def __repr__(self):
def __str__(self):
def description(self):
def set_node_id(self, index, node):
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
def get_topological_graph_nodes_list(self):
def name(self):
def name(self, name):
def nodes(self):
def reverse_nodes(self):
def tensors(self):
def end_tensors(self):
def inputs(self):
def outputs(self):
def op_types(self):
def append_node(self, node):
def add_param_name(self, param_name):
def param_names(self):
def block(self):
def is_tensor_in_graph(self, tensor_name):
def update_node_idx(self, node, index):
def clear_node_id_map(self):
def remove_tensor(self, tensor):
def insert_node_between_nodes(self, new_node, parent_node, child_node):
def set_top_block(self, block):
def add_block(self, block):
def all_blocks(self):
def all_nodes(self):
def head_node(self):
def return_node(self):
def clean_tensors_data(self):
def assign_node_topological_name(self, prefix="", suffix=""):
def _assgin_nodes(nodes):
def simple_description(self):
def get_node_simple_info(node):
def get_md5(self):
class Node(NodeBase):
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
def __repr__(self):
def __str__(self):
def __deepcopy__(self, memo):
def clone_from(self, src_node, local_map):
def scope_name(self):
def scope_name(self, name):
def description(self):
def clean_connections(self):
def add_in_node(self, node_name: str):
def add_out_node(self, node_name: str):
def in_tensors(self):
def out_tensors(self):
def in_nodes(self):
def out_nodes(self):
def node_attr(self, key):
def set_node_attr(self, key, value):
def node_config(self, key):
def set_node_config(self, key, value):
def has_bound_params(self):
def op_type(self):
def name(self):
def name(self, value):
def idx(self):
def idx(self, index):
def op(self):
def op(self, op):
def dtype(self):
def in_quant_part(self) -> bool:
def in_quant_part(self, quant_state: bool) -> None:
def module(self):
def module(self, module):
def blocks(self):
def add_block(self, block):
def has_custom_op(self):
def get_attr_val(self, attr_name):
def merged(self):
def merged(self, flag):
def transpose_in_order(self):
def transpose_in_order(self, order):
def transpose_out_order(self):
def transpose_out_order(self, order):
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
def destroy(self):
def remove_output(self, i):
def replace_input_at(self, i, new_tensor):
def remove_input(self, i):
def remove_all_inputs(self):
def drop_input(self, i):
def find_use_for_input(self, i):
def owning_block(self):
def owning_block(self, block):
def owning_graph(self):
def owning_graph(self, graph):
def topo_position(self):
def topo_position(self, pos):
def insert_before(self, node):
def insert_after(self, node):
def update_topo_position(self):
def next_node(self):
def next_node(self, node):
def prev_node(self):
def prev_node(self, node):
def in_node_list(self):
def remove_from_list(self):
def add_in_tensor(self, tensor):
def add_out_tensor(self, tensor):
def target_device(self):
def target_device(self, device):
def scope_name(self):
def scope_name(self, scope_name):
def source_range(self):
def source_range(self, source_range):
def normalized_name(self):
def normalized_name(self, name):
def modify_instancenorm(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
# Under test...
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, InstanceNormStructuredExpanding), \
"Variable node_expanding here has to be instance of InstanceNormStructuredExpanding"
input_expanding = expanding_desc[node.in_nodes[0]]
node_expanding.in_dim = input_expanding.out_dim
node_expanding.out_dim = input_expanding.out_dim
node.op.attr["num_features"] = input_expanding.out_dim
for insert in input_expanding.out_inserts:
node_expanding.add_gamma_insert(DataInsert(insert.position, insert.added_num_channels))
node_expanding.add_beta_insert(DataInsert(insert.position, insert.added_num_channels)) | null |
23,165 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class DataInsert(object):
def __init__(self, position: int = 0, added_num_channels: int = 0, added_data: Tensor = None) -> None:
self._position: int = position
self._added_num_channels: int = added_num_channels
self._added_data: Tensor = added_data
def position(self) -> int:
return self._position
def added_num_channels(self) -> int:
return self._added_num_channels
def added_data(self) -> Tensor:
return self._added_data
def added_data(self, data: Tensor) -> None:
self._added_data = data
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
self._node_name: str = node_name
self._in_dim: int = 0
self._out_dim: int = 0
def node_name(self) -> str:
return self._node_name
def in_dim(self) -> int:
return self._in_dim
def in_dim(self, v: int) -> None:
self._in_dim = v
def out_dim(self) -> int:
return self._out_dim
def out_dim(self, v: int) -> None:
self._out_dim = v
def added_out_channel(self) -> int:
raise NotImplementedError("method added_out_channel is not implemented")
def added_in_channel(self) -> int:
raise NotImplementedError("method added_in_channel is not implemented")
def out_inserts(self) -> List[DataInsert]:
raise NotImplementedError("method out_inserts is not implemented")
class BatchNormStructuredExpanding(StructuredExpanding):
def __init__(self, node_name: str) -> None:
super().__init__(node_name)
self._moving_mean_inserts: List[DataInsert] = []
self._moving_var_inserts: List[DataInsert] = []
self._beta_inserts: List[DataInsert] = []
self._gamma_inserts: List[DataInsert] = []
def added_out_channel(self) -> int:
ret = 0
for insert in self._moving_mean_inserts:
ret += insert.added_num_channels
return ret
def added_in_channel(self) -> int:
ret = 0
for insert in self._moving_mean_inserts:
ret += insert.added_num_channels
return ret
def moving_mean_inserts(self) -> List[DataInsert]:
return self._moving_mean_inserts
def moving_mean_inserts(self, v: List[DataInsert]) -> None:
self._moving_mean_inserts = v
def moving_var_inserts(self) -> List[DataInsert]:
return self._moving_var_inserts
def moving_var_inserts(self, v: List[DataInsert]) -> None:
self._moving_var_inserts = v
def beta_inserts(self) -> List[DataInsert]:
return self._beta_inserts
def beta_inserts(self, v: List[DataInsert]) -> None:
self._beta_inserts = v
def gamma_inserts(self) -> List[DataInsert]:
return self._gamma_inserts
def gamma_inserts(self, v: List[DataInsert]) -> None:
self._gamma_inserts = v
def out_inserts(self) -> List[DataInsert]:
return self._moving_mean_inserts
def add_moving_mean_insert(self, weight_insert: DataInsert):
self._moving_mean_inserts.append(weight_insert)
def add_moving_var_insert(self, weight_insert: DataInsert):
self._moving_var_inserts.append(weight_insert)
def add_beta_insert(self, weight_insert: DataInsert):
self._beta_inserts.append(weight_insert)
def add_gamma_insert(self, weight_insert: DataInsert):
self._gamma_inserts.append(weight_insert)
class Graph(GraphBase):
""" Graph object of NNDCT, contain list of NndctNodes.
That will be used for topology or export to XGraph"""
def __init__(self, graph_name=None):
super(Graph, self).__init__()
self._name = graph_name or 'NndctGraph'
self._nodes_by_name = {}
self._nodes_by_id = {}
self._end_tensors = []
self._copy_tensor_map = {}
self._tensors = {}
self._blocks = []
self._param_names = []
self._top_block = None
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
if isinstance(node_or_name, str):
return node_or_name in self._nodes_by_name
else:
return node_or_name.name in self._nodes_by_name
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone(self):
graph = self.__class__(self.name)
graph.clone_from(self)
return graph
def clone_from(self, src_graph):
local_map = {}
converted_nodes = []
head_node = self.create_node_from(src_graph.head_node, local_map, converted_nodes)
return_node = self.create_node_from(src_graph.return_node, local_map, converted_nodes)
top_block = Block(self, None, head_node, return_node)
self.set_top_block(top_block)
self._top_block.clone_from(src_graph.block, local_map, converted_nodes)
def create_node_from(self, src_node, local_map, converted_nodes):
node = Node(src_node.name, dtype=src_node.dtype, in_quant_part=src_node.in_quant_part)
node.owning_graph = self
node.idx = src_node.idx
node.scope_name = src_node.scope_name
node.source_range = src_node.source_range
node.target_device = src_node.target_device
node.normalized_name = src_node.normalized_name
converted_nodes.append(src_node.name)
for out in src_node.out_tensors:
if out.name in local_map:
node.add_out_tensor(local_map[out.name])
else:
tensor = Tensor(name=out.name)
tensor.clone_from(out)
local_map[out.name] = tensor
node.add_out_tensor(tensor)
for inp in src_node.in_tensors:
if inp.name in local_map:
node.add_in_tensor(local_map[inp.name])
else:
tensor = Tensor(name=inp.name)
tensor.clone_from(inp)
local_map[inp.name] = tensor
node.add_in_tensor(tensor)
node.clone_from(src_node, local_map)
for src_block in src_node.blocks:
head_node = self.create_node_from(src_block.input_node, local_map, converted_nodes)
return_node = self.create_node_from(src_block.return_node, local_map, converted_nodes)
block = Block(self, node, head_node, return_node)
block.clone_from(src_block, local_map, converted_nodes)
node.add_block(block)
return node
def node(self, name):
"""Return node with the specified name"""
return self._nodes_by_name.get(name, None)
def get_node_by_idx(self, idx):
node = self._nodes_by_id.get(idx, None)
assert node is not None
return node
def get_input_nodes(self):
input_nodes = []
for node in self.nodes:
if (len(self.parents(node)) == 0) and \
(node.op.type==NNDCT_OP.INPUT or node.op.type==NNDCT_OP.TUPLE_INPUT):
input_nodes.append(node)
return input_nodes
def get_input_tensors(self, input_args):
input_tensors = []
graph_name = self.name
input_nodes = self.get_input_nodes()
for idx in range(len(input_args)):
#input_node_name = graph_name + "::input_" + str(idx)
#input_node = self.node(input_node_name)
input_node = input_nodes[idx]
input_tensor = input_node.out_tensors[0]
if input_node.op.type == NNDCT_OP.INPUT:
input_tensors.append(input_tensor.name)
elif input_node.op.type == NNDCT_OP.TUPLE_INPUT:
for index in range(len(input_args[idx])):
input_tensor_name = input_tensor.name + '.' + str(index)
input_tensors.append(input_tensor_name)
return input_tensors
def get_return_tensors(self):
return_tensors = []
for tensor in self.return_node.in_tensors:
return_tensors.append(tensor.name)
return return_tensors
def add_node(self, node: Node) -> None:
if node.name in self._nodes_by_name:
return
if node.idx in self._nodes_by_id and node is not self._nodes_by_id[node.idx]:
raise RuntimeError(f"The id `{node.idx}` of {node.name} has been added into graph")
if node.idx == -1:
# if not self._nodes_by_id:
# node._idx = 0
# else:
# node._idx = max([node.idx for node in self.all_nodes()]) + 1
node._idx = -sys.maxsize + len(list(self.all_nodes()))
self._nodes_by_name[node.name] = node
self._nodes_by_id[node.idx] = node
def free_node(self, node):
node.owning_graph = None
self._nodes_by_name.pop(node.name)
self._nodes_by_id.pop(node.idx)
def remove_node(self, node):
assert node.in_tensors
assert len(node.out_tensors) == 1
out_tensor = node.out_tensors[0]
inp_tensor = node.in_tensors[0]
out_tensor.replace_uses_with(inp_tensor)
node.destroy()
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
if any([node_type in self.op_types for node_type in node_types]):
nodes_to_remove = []
for node in self.nodes:
if node.op.type in node_types:
nodes_to_remove.append(node)
for node in nodes_to_remove:
self.remove_node(node)
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
conv_nodes = []
for node in self.nodes:
if node.op.type in node_types:
conv_nodes.append(node)
else:
continue
return conv_nodes
def reconnect_nodes(self):
self._nodes_by_id.clear()
for idx, node in enumerate(self.nodes):
node.idx = idx
self._nodes_by_id[idx] = node
node.clean_connections()
self.connect_nodes()
def connect_nodes(self):
for nodeA in self.nodes:
for input_tensor in nodeA.in_tensors:
for nodeB in self.nodes:
if nodeB is not nodeA and input_tensor in nodeB.out_tensors:
#nodeB.outputs.add(input_tensor.node.name)
nodeB.add_out_node(nodeA.name)
nodeA.add_in_node(input_tensor.node.name)
def parents(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.in_nodes]
def children(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.out_nodes]
def add_tensor(self, tensor):
self._tensors[tensor.name] = tensor
def tensor(self, name):
return self._tensors.get(name, None)
def param_tensor(self, name):
for node in self.all_nodes():
for _, tensor in node.op.params.items():
if tensor.name == name:
return tensor
def add_end_tensor(self, tensor):
self._end_tensors.append(tensor)
def __repr__(self):
return f"Graph(name={self.name})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def description(self):
graph_des = {}
graph_des['graph_name'] = f"{self.__class__.__name__}"
graph_des['nodes'] = []
for n in sorted(self.nodes, key=lambda n: n.idx):
graph_des['nodes'].append(n.description())
return graph_des
def set_node_id(self, index, node):
node.idx = index
self._nodes_by_id[index] = node
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
"""
create a subgraph from nodeset belong to origin graph
"""
assert len(nodeset) >= 2
sorted_nodeset = origin_graph.top_sort_nodeset(nodeset)
for node in sorted_nodeset:
node.remove_from_list()
subgraph = cls(graph_name)
sorted_nodeset[0].owning_graph = subgraph
sorted_nodeset[-1].owning_graph = subgraph
block = Block(subgraph, None, sorted_nodeset[0], sorted_nodeset[-1])
subgraph.set_top_block(block)
if len(sorted_nodeset) > 2:
for node in sorted_nodeset[1:-1]:
node.owning_graph = subgraph
subgraph.append_node(node)
return subgraph
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
sorted_nodeset = sorted(nodeset, key=lambda n: n.topo_position)
return sorted_nodeset
def get_topological_graph_nodes_list(self):
nodes_list = [node for node in self.nodes]
return Graph.top_sort_nodeset(nodes_list)
def name(self):
return self._name
def name(self, name):
self._name = name
def nodes(self):
return self._top_block.nodes
def reverse_nodes(self):
return self._top_block.reverse_nodes
def tensors(self):
for tensor in self._tensors.values():
yield tensor
# TODO: Remove
def end_tensors(self):
return [tensor for tensor in self.return_node.in_tensors]
def inputs(self):
return [node for node in self.all_nodes() if not node.in_nodes]
def outputs(self):
return [node for node in self.all_nodes() if not node.out_nodes]
def op_types(self):
return {node.op.type for node in self.all_nodes()}
def append_node(self, node):
self._top_block.append_node(node)
def add_param_name(self, param_name):
if param_name not in self._param_names:
self._param_names.append(param_name)
def param_names(self):
return list(self._param_names)
def block(self):
return self._top_block
def is_tensor_in_graph(self, tensor_name):
return True if tensor_name in self._tensors else False
def update_node_idx(self, node, index):
self._nodes_by_id[index] = node
def clear_node_id_map(self):
self._nodes_by_id.clear()
def remove_tensor(self, tensor):
self._tensors.pop(tensor.name)
if tensor.name in self._param_names:
self._param_names.remove(tensor.name)
def insert_node_between_nodes(self, new_node, parent_node, child_node):
assert parent_node.in_node_list() and child_node.in_node_list()
assert (parent_node.owning_graph == child_node.owning_graph
and parent_node.owning_block == child_node.owning_block)
new_node.owning_block = parent_node.owning_block
new_node.owning_graph = parent_node.owning_graph
tensor = Tensor(name=new_node.name, node=new_node)
new_node.add_out_tensor(tensor)
out_tensor = None
offset = None
for out in parent_node.out_tensors:
for use in out.uses:
if use.user is child_node:
out_tensor = out
offset = use.offset
break
#out_tensor.replace_uses_with(new_node.out_tensors[0])
child_node.replace_input_at(offset, new_node.out_tensors[0])
new_node.add_in_tensor(out_tensor)
new_node.insert_after(parent_node)
def set_top_block(self, block):
self._top_block = block
def add_block(self, block):
self._blocks.append(block)
def all_blocks(self):
return self._blocks
def all_nodes(self):
for _, node in self._nodes_by_name.items():
yield node
def head_node(self):
return self._top_block.input_node
def return_node(self):
return self._top_block.return_node
def clean_tensors_data(self):
for tensor in self.tensors:
tensor.clean_data()
def assign_node_topological_name(self, prefix="", suffix=""):
count = itertools.count(0)
illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
def _assgin_nodes(nodes):
sorted_nodes = self.top_sort_nodeset(list(nodes))
for n in sorted_nodes:
if n.blocks:
for block in n.blocks:
_assgin_nodes(block.nodes)
else:
candidate = illegal_char_regex.sub("_", n.op.type)
n.normalized_name = f"{prefix}{candidate}_{next(count)}{suffix}"
_assgin_nodes(self.nodes)
def simple_description(self):
"""
Only describe op type topological info.
"""
def get_node_simple_info(node):
node_des = {}
node_des['op'] = node.op.type
node_des["input_ops"] = [node.owning_graph.node(inode).op.type for inode in node.in_nodes]
node_des["output_ops"] = [node.owning_graph.node(onode).op.type for onode in node.out_nodes]
if node.blocks:
for i, block in enumerate(node.blocks):
node_des[f'block_{i}'] = []
for n in self.top_sort_nodeset(list(block.nodes)):
node_des[f'block_{i}'].append(get_node_simple_info(n))
return node_des
graph_des = {}
graph_des['nodes'] = []
for n in self.top_sort_nodeset(list(self.nodes)):
graph_des['nodes'].append(get_node_simple_info(n))
graph_str = json.dumps(graph_des, indent=2, separators=(',', ': '))
return graph_str
def get_md5(self):
import hashlib
graph_str = self.simple_description()
md = hashlib.md5()
md.update(graph_str.encode("utf-8"))
return md.hexdigest()
class Node(NodeBase):
"""A node contains an op and its input and output tensor.
"""
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
super().__init__()
self._name = name
self._op = op
self._dtype = dtype
self._idx = -1
self._scope_name = ""
self._source_range = ""
self._normalized_name = ""
self._in_tensors = []
self._out_tensors = []
self._in_nodes = []
self._out_nodes = []
self._blocks = []
self._is_quantizable = in_quant_part
self._is_merged = False
self._transpose_in_order = None
self._transpose_out_order = None
self._topo_position = 0
self._block = None
self._graph = None
self._neighbor_nodes = [None, None]
self._target_device = None
def __repr__(self):
return f"Node(name={self.name}, id={self.idx}, op_type={self.op.type}, quant_state={self.in_quant_part})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_node, local_map):
tmp_attrs = src_node.op._attrs
tmp_params = src_node.op._params
tmp_configs = src_node.op._configs
src_node.op._params = copy.copy(tmp_params)
src_node.op._attrs = copy.copy(tmp_attrs)
src_node.op._configs = copy.copy(tmp_configs)
self.op = copy.copy(src_node.op)
self.op._export_attr_and_param()
src_node.op._attrs = tmp_attrs
src_node.op._params = tmp_params
src_node.op._configs = tmp_configs
self.op.clone_from(src_node.op, local_map)
def scope_name(self):
return self._scope_name
def scope_name(self, name):
self._scope_name = name
def description(self):
node_des = {}
node_des['name'] = self._name
node_des['scope_name'] = self._scope_name
node_des['idx'] = self._idx
node_des['dtype'] = self._dtype
node_des['enable_quant'] = self._is_quantizable
node_des['in_nodes'] = [i for i in self.in_nodes]
node_des['out_nodes'] = [o for o in self.out_nodes]
node_des['in_tensors'] = [it.description() for it in self.in_tensors]
node_des['out_tensors'] = [ot.description() for ot in self.out_tensors]
node_des['op'] = self._op.description()
if self._blocks:
for i, block in enumerate(self._blocks):
node_des[f'block_{i}'] = []
for n in sorted(block.nodes, key=lambda n: n.idx):
node_des[f'block_{i}'].append(n.description())
return node_des
def clean_connections(self):
self._in_nodes = []
self._out_nodes = []
def add_in_node(self, node_name: str):
if node_name not in self._in_nodes:
self._in_nodes.append(node_name)
def add_out_node(self, node_name: str):
if node_name not in self._out_nodes:
self._out_nodes.append(node_name)
def in_tensors(self):
return self._in_tensors
def out_tensors(self):
return self._out_tensors
def in_nodes(self):
nodes = []
for tensor in self.in_tensors:
if tensor.node is not None:
nodes.append(tensor.node.name)
return nodes
def out_nodes(self):
nodes = []
for out in self.out_tensors:
for use in out.uses:
nodes.append(use.user.name)
return nodes
def node_attr(self, key):
return self._op.get_attr(key)
def set_node_attr(self, key, value):
if all([val is None for val in self._op._attr_value_mem[key]]):
self._op.set_attr(key, value)
else:
self._op.update_attr(key, value)
def node_config(self, key):
return self._op.get_config(key)
def set_node_config(self, key, value):
self._op.set_config(key, value)
def has_bound_params(self):
return self._op.has_native_params()
def op_type(self):
return self.op.type
def name(self):
return self._name
def name(self, value):
self._name = value
def idx(self):
return self._idx
def idx(self, index):
self._idx = index
self.owning_graph.update_node_idx(self, index)
def op(self):
return self._op
def op(self, op):
self._op = op
def dtype(self):
return self._dtype
# @property
# def alias(self):
# return self._alias
def in_quant_part(self) -> bool:
return self._is_quantizable
def in_quant_part(self, quant_state: bool) -> None:
self._is_quantizable = quant_state
def module(self):
return self._module()
def module(self, module):
self._module = weakref.ref(module)
def blocks(self):
return self._blocks
def add_block(self, block):
self._blocks.append(block)
def has_custom_op(self):
return isinstance(self.op, CustomOp)
def get_attr_val(self, attr_name):
attr = self.node_attr(attr_name)
return attr.data if isinstance(attr, Tensor) else attr
def merged(self):
return self._is_merged
def merged(self, flag):
self._is_merged = flag
def transpose_in_order(self):
return self._transpose_in_order
def transpose_in_order(self, order):
self._transpose_in_order = order
def transpose_out_order(self):
return self._transpose_out_order
def transpose_out_order(self, order):
self._transpose_out_order = order
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
for attr_name, attr_value in self.op.attrs.items():
if attr_value.value is old_tensor:
self.set_node_attr(attr_name, new_tensor)
def destroy(self):
if len(self.blocks) > 0:
raise RuntimeError("Can't destroy if or loop node.")
while len(self.out_tensors) > 0:
self.remove_output(len(self.out_tensors) - 1)
self.remove_all_inputs()
if self.in_node_list():
self.remove_from_list()
self.owning_graph.free_node(self)
def remove_output(self, i):
assert i < len(self.out_tensors)
assert len(self.out_tensors[i].uses) == 0
output = self.out_tensors.pop(i)
self.owning_graph.remove_tensor(output)
for output_offset in range(i, len(self.out_tensors)):
self.out_tensors[output_offset].offset -= 1
def replace_input_at(self, i, new_tensor):
old_tensor = self.in_tensors[i]
if old_tensor is new_tensor:
return
self.in_tensors[i] = new_tensor
uses = [u for u in old_tensor.uses]
attr_uses = [attr_u for attr_u in old_tensor.attr_uses]
for u in uses:
if u.user is self:
new_tensor.uses.append(u)
old_tensor.uses.remove(u)
for attr_u in attr_uses:
if attr_u.user is self.op:
old_tensor.replace_attr_with_new_tensor_v2(attr_u, new_tensor)
def remove_input(self, i):
self.drop_input(i)
for j in range(i + 1, len(self._in_tensors)):
it = self.find_use_for_input(j)
it.offset -= 1
self._in_tensors.pop(i)
def remove_all_inputs(self):
for i in range(len(self.in_tensors)):
self.drop_input(i)
self.in_tensors.clear()
def drop_input(self, i):
assert i < len(self.in_tensors)
input_value = self.in_tensors[i]
use_it = self.find_use_for_input(i)
input_value.uses.remove(use_it)
self.in_tensors[i] = None
return input_value
def find_use_for_input(self, i):
use_it = None
for use in self.in_tensors[i].uses:
if use.offset == i and use.user is self:
use_it = use
assert use_it is not None
return use_it
def owning_block(self):
return self._block
def owning_block(self, block):
self._block = block
def owning_graph(self):
return self._graph
def owning_graph(self, graph):
self._graph = graph
if self._graph:
self._graph.add_node(self)
def topo_position(self):
return self._topo_position
def topo_position(self, pos):
self._topo_position = pos
def insert_before(self, node):
assert node.in_node_list()
self.insert_after(node.prev_node)
def insert_after(self, node):
assert not self.in_node_list() and node.in_node_list()
assert node.owning_block is not None
self._block = node.owning_block
next_node = node.next_node
node.next_node = self
self.prev_node = node
self.next_node = next_node
next_node.prev_node = self
self.update_topo_position()
def update_topo_position(self):
is_first_node = self.prev_node is self.owning_block.input_node
is_last_node = self.next_node is self.owning_block.return_node
prev_pos = self.prev_node.topo_position
next_pos = self.next_node.topo_position
if is_last_node:
if is_first_node:
self.topo_position = MID_POSITION
return
if prev_pos >= (POSITION_UPPER_BOUND - APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = prev_pos + APPEND_INTERVAL
elif is_first_node:
if next_pos <= (POSITION_LOWER_BOUND + APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = next_pos - APPEND_INTERVAL
else:
pos_between = prev_pos + (next_pos - prev_pos) / 2
if pos_between == prev_pos:
self.owning_block.reindex_topo()
return
self.topo_position = pos_between
def next_node(self):
return self._neighbor_nodes[1]
def next_node(self, node):
self._neighbor_nodes[1] = node
def prev_node(self):
return self._neighbor_nodes[0]
def prev_node(self, node):
self._neighbor_nodes[0] = node
def in_node_list(self):
if self.next_node is None:
assert self.prev_node is None
return self.next_node is not None
def remove_from_list(self):
assert self.in_node_list()
if self.owning_block.input_node is self:
self.owning_block.input_node = self.next_node
self.owning_block = None
next_node = self.next_node
prev_node = self.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
self.next_node = None
self.prev_node = None
def add_in_tensor(self, tensor):
tensor.uses.append(Use(self, len(self.in_tensors)))
self._in_tensors.append(tensor)
self.owning_graph.add_tensor(tensor)
def add_out_tensor(self, tensor):
tensor.offset = len(self.out_tensors)
self._out_tensors.append(tensor)
tensor.node = self
self.owning_graph.add_tensor(tensor)
def target_device(self):
return self._target_device
def target_device(self, device):
self._target_device = device
def scope_name(self):
return self._scope_name
def scope_name(self, scope_name):
self._scope_name = scope_name
def source_range(self):
return self._source_range
def source_range(self, source_range):
self._source_range = source_range
def normalized_name(self):
return self._normalized_name
def normalized_name(self, name):
self._normalized_name = name
class Tensor(object):
"""A wrapper of np.ndarray used in two ways:
- The outputs of an operation.
- The parameters of an operation.
In the former case, you can use `tensor.node` to get the node that
outputs this tensor.
In the latter case, `tensor.node` is None.
For getting raw ndarray, call `tensor.data`.
"""
def __init__(self,
name=None,
shape=None,
dtype=None,
device=None,
requires_grad=None,
data=None,
node=None,
layout=None):
self._node = weakref.ref(node) if node else node
self._name = name
self._shape = shape
self._data = data
self._dtype_map = {
np.dtype('float64'): 'float64',
np.dtype('float32'): 'float32',
np.dtype('float16'): 'float16',
np.dtype('complex64'): 'complex64',
np.dtype('int64'): 'int64',
np.dtype('int32'): 'int32',
np.dtype('int16'): 'int16',
np.dtype('int8'): 'int8',
np.dtype('bool'): 'bool',
np.dtype('uint8'): 'uint8'
}
if dtype in self._dtype_map:
self._dtype = self._dtype_map[dtype]
else:
self._dtype = dtype
self._device = device
self._requires_grad = requires_grad
self._offset = 0
self._uses = []
self._attr_uses = []
self._device = device
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_tensor):
self._shape = src_tensor._shape
self._data = copy.deepcopy(src_tensor._data)
self._dtype = src_tensor._dtype
self._device = src_tensor._device
self._requires_grad = src_tensor._requires_grad
def from_ndarray(self, data):
if not isinstance(data, np.ndarray):
raise TypeError("'data' must be a numpy ndarray")
self._data = np.copy(data)
self._dtype = self._dtype_map[self._data.dtype]
self._shape = list(self._data.shape)
def from_tensor(self, tensor):
self._dtype = tensor.dtype
self._shape = tensor.shape
def from_des(self, shape, dtype):
self._shape = shape
self._dtype = dtype
def transpose(self, axes=None):
trans_data = None
if self._data is not None:
trans_data = self._data.transpose(axes)
trans_data = np.ascontiguousarray(trans_data)
trans_shape = list(trans_data.shape)
else:
trans_shape = [self._shape[i] for i in axes]
self._data = trans_data
self._shape = trans_shape
def clean_data(self):
self._data = None
def __str__(self):
return "Tensor: {}(shape={}, dtype={})".format(
self._name if self._name else "", self._shape, self._dtype)
def description(self):
desp = {}
desp['name'] = self._name
desp['shape'] = self._shape
desp['dtype'] = self._dtype
desp['node'] = self.node.name if self.node else None
return desp
def is_real_tensor(self):
return self.is_complete_tensor() or self.dtype == "tensor"
def is_list_type(self):
return "list" in self.dtype
def is_complete_tensor(self) -> bool:
# not necessary to hold real data for completeTensor
return True if self.shape and self.dtype else False
def is_param_tensor(self) -> bool:
return True if self._node is None else False
def shape(self):
return self._shape
def shape(self, shape):
self._shape = shape
def ndim(self):
return len(self._shape) if self._shape else None
def dtype(self):
return self._dtype
def dtype(self, dtype):
self._dtype = dtype
def data(self):
return self._data
def data(self, value):
if isinstance(value, np.ndarray):
self.from_ndarray(value)
elif isinstance(value, Tensor):
self.from_tensor(value)
elif isinstance(value, (int, float, bool)):
#raise ValueError(f"Accept [int, float, bool] type data, but {type(value)} is given")
self._data = value
self._shape = []
def node(self):
return self._node() if self._node is not None else None
def node(self, value):
self._node = weakref.ref(value) if value else value
def name(self):
return self._name
def name(self, name):
self._name = name
def device(self):
return self._device
def device(self, device):
self._device = device
def requires_grad(self):
return self._requires_grad
def requires_grad(self, need_grad):
self._requires_grad = need_grad
def offset(self):
return self._offset
def offset(self, offset):
self._offset = offset
def uses(self):
return self._uses
def owning_graph(self):
return self.node.owning_graph
def attr_uses(self):
return self._attr_uses
def replace_first_use_with(self, new_tensor):
assert self.owning_graph is new_tensor.owning_graph
u = self.uses[0]
u.user.in_tensors[u.offset] = new_tensor
new_tensor.uses.append(u)
self.uses.pop(0)
def replace_uses_with(self, new_tensor):
assert self is not new_tensor
while len(self.uses) > 0:
self.replace_first_use_with(new_tensor)
self.replace_attr_uses_with(new_tensor)
def replace_attr_with_new_tensor_v2(self, attr_use, new_tensor):
def _replace(attr_name, attr_value):
if isinstance(attr_value, list):
new_attr_value = []
for value in attr_value:
if self is value:
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
elif isinstance(value, (tuple, list)):
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
else:
new_attr_value.append(value)
return new_attr_value
elif isinstance(attr_value, tuple):
new_attr_value = []
for value in list(attr_value):
if self is value:
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
elif isinstance(value, (tuple, list)):
new_value = _replace(attr_name, value)
new_attr_value.append(new_value)
else:
new_attr_value.append(value)
new_attr_value = tuple(new_attr_value)
return new_attr_value
else:
if self is not attr_value:
if attr_name == attr_use.attr_name:
self.attr_uses.remove(attr_use)
return attr_value
else:
if attr_use not in new_tensor.attr_uses:
new_tensor.attr_uses.append(attr_use)
self.attr_uses.remove(attr_use)
return new_tensor
if isinstance(attr_use.attr_name, str):
attr_value = attr_use.user.get_config(attr_use.attr_name)
else:
attr_value = attr_use.user.get_attr(attr_use.attr_name)
new_attr_value = _replace(attr_use.attr_name, attr_value)
if isinstance(attr_use.attr_name, str):
attr_use.user.set_config(attr_use.attr_name, new_attr_value)
else:
attr_use.user.update_attr(attr_use.attr_name, new_attr_value)
def replace_attr_with_new_tensor(self, attr_use, new_tensor):
def _replace(attr_name, attr_value):
if isinstance(attr_value, list):
if self in attr_value:
for i in range(len(attr_value)):
if attr_value[i] is self:
attr_value[i] = new_tensor
if attr_use not in new_tensor.attr_uses:
new_tensor.attr_uses.append(attr_use)
self.attr_uses.remove(attr_use)
return
else:
for val in attr_value:
_replace(attr_name, val)
if self is not attr_value:
if attr_name == attr_use.attr_name:
self.attr_uses.remove(attr_use)
return
if isinstance(attr_name, str):
attr_use.user.set_config(attr_name, new_tensor)
else:
attr_use.user.update_attr(attr_name, new_tensor)
if isinstance(attr_use.attr_name, str):
attr_value = attr_use.user.get_config(attr_use.attr_name)
else:
attr_value = attr_use.user.get_attr(attr_use.attr_name)
_replace(attr_use.attr_name, attr_value)
def replace_first_attr_use_with(self, new_tensor):
attr_use = self.attr_uses[0]
self.replace_attr_with_new_tensor_v2(attr_use, new_tensor)
def replace_attr_uses_with(self, new_tensor):
while len(self.attr_uses) > 0:
self.replace_first_attr_use_with(new_tensor)
def modify_batchnorm(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, BatchNormStructuredExpanding), \
"Variable node_expanding here has to be instance of BatchNormStructuredExpanding"
input_expanding = expanding_desc[node.in_nodes[0]]
node_expanding.in_dim = input_expanding.out_dim
node_expanding.out_dim = input_expanding.out_dim
node.op.attr["out_dim"] = input_expanding.out_dim
for insert in input_expanding.out_inserts:
node_expanding.add_moving_mean_insert(
DataInsert(insert.position, insert.added_num_channels, Tensor(data=np.zeros(insert.added_num_channels))))
node_expanding.add_moving_var_insert(
DataInsert(insert.position, insert.added_num_channels, Tensor(data=np.ones(insert.added_num_channels))))
node_expanding.add_beta_insert(
DataInsert(insert.position, insert.added_num_channels, Tensor(data=np.zeros(insert.added_num_channels))))
node_expanding.add_gamma_insert(
DataInsert(insert.position, insert.added_num_channels, Tensor(data=np.ones(insert.added_num_channels)))) | null |
23,166 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class DataInsert(object):
def __init__(self, position: int = 0, added_num_channels: int = 0, added_data: Tensor = None) -> None:
self._position: int = position
self._added_num_channels: int = added_num_channels
self._added_data: Tensor = added_data
def position(self) -> int:
return self._position
def added_num_channels(self) -> int:
return self._added_num_channels
def added_data(self) -> Tensor:
return self._added_data
def added_data(self, data: Tensor) -> None:
self._added_data = data
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
self._node_name: str = node_name
self._in_dim: int = 0
self._out_dim: int = 0
def node_name(self) -> str:
return self._node_name
def in_dim(self) -> int:
return self._in_dim
def in_dim(self, v: int) -> None:
self._in_dim = v
def out_dim(self) -> int:
return self._out_dim
def out_dim(self, v: int) -> None:
self._out_dim = v
def added_out_channel(self) -> int:
raise NotImplementedError("method added_out_channel is not implemented")
def added_in_channel(self) -> int:
raise NotImplementedError("method added_in_channel is not implemented")
def out_inserts(self) -> List[DataInsert]:
raise NotImplementedError("method out_inserts is not implemented")
class WeightedNodeStructuredExpanding(StructuredExpanding):
def __init__(self, node_name: str) -> None:
super().__init__(node_name)
self._weight_out_inserts: List[DataInsert] = []
self._weight_in_inserts: List[DataInsert] = []
self._bias_inserts: List[DataInsert] = []
def added_out_channel(self) -> int:
ret = 0
for insert in self._weight_out_inserts:
ret += insert.added_num_channels
return ret
def added_in_channel(self) -> int:
ret = 0
for insert in self._weight_in_inserts:
ret += insert.added_num_channels
return ret
def weight_out_inserts(self) -> List[DataInsert]:
return self._weight_out_inserts
def weight_out_inserts(self, v: List[DataInsert]) -> None:
self._weight_out_inserts = v
def weight_in_inserts(self) -> List[DataInsert]:
return self._weight_in_inserts
def weight_in_inserts(self, v: List[DataInsert]) -> None:
self._weight_in_inserts = v
def bias_inserts(self) -> List[DataInsert]:
return self._bias_inserts
def bias_inserts(self, v: List[DataInsert]) -> None:
self._bias_inserts = v
def out_inserts(self) -> List[DataInsert]:
return self._weight_out_inserts
def add_weight_out_insert(self, weight_insert: DataInsert):
self._weight_out_inserts.append(weight_insert)
def add_weight_in_insert(self, weight_insert: DataInsert):
self._weight_in_inserts.append(weight_insert)
def add_bias_insert(self, bias_insert: DataInsert):
self._bias_inserts.append(bias_insert)
class Graph(GraphBase):
""" Graph object of NNDCT, contain list of NndctNodes.
That will be used for topology or export to XGraph"""
def __init__(self, graph_name=None):
super(Graph, self).__init__()
self._name = graph_name or 'NndctGraph'
self._nodes_by_name = {}
self._nodes_by_id = {}
self._end_tensors = []
self._copy_tensor_map = {}
self._tensors = {}
self._blocks = []
self._param_names = []
self._top_block = None
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
if isinstance(node_or_name, str):
return node_or_name in self._nodes_by_name
else:
return node_or_name.name in self._nodes_by_name
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone(self):
graph = self.__class__(self.name)
graph.clone_from(self)
return graph
def clone_from(self, src_graph):
local_map = {}
converted_nodes = []
head_node = self.create_node_from(src_graph.head_node, local_map, converted_nodes)
return_node = self.create_node_from(src_graph.return_node, local_map, converted_nodes)
top_block = Block(self, None, head_node, return_node)
self.set_top_block(top_block)
self._top_block.clone_from(src_graph.block, local_map, converted_nodes)
def create_node_from(self, src_node, local_map, converted_nodes):
node = Node(src_node.name, dtype=src_node.dtype, in_quant_part=src_node.in_quant_part)
node.owning_graph = self
node.idx = src_node.idx
node.scope_name = src_node.scope_name
node.source_range = src_node.source_range
node.target_device = src_node.target_device
node.normalized_name = src_node.normalized_name
converted_nodes.append(src_node.name)
for out in src_node.out_tensors:
if out.name in local_map:
node.add_out_tensor(local_map[out.name])
else:
tensor = Tensor(name=out.name)
tensor.clone_from(out)
local_map[out.name] = tensor
node.add_out_tensor(tensor)
for inp in src_node.in_tensors:
if inp.name in local_map:
node.add_in_tensor(local_map[inp.name])
else:
tensor = Tensor(name=inp.name)
tensor.clone_from(inp)
local_map[inp.name] = tensor
node.add_in_tensor(tensor)
node.clone_from(src_node, local_map)
for src_block in src_node.blocks:
head_node = self.create_node_from(src_block.input_node, local_map, converted_nodes)
return_node = self.create_node_from(src_block.return_node, local_map, converted_nodes)
block = Block(self, node, head_node, return_node)
block.clone_from(src_block, local_map, converted_nodes)
node.add_block(block)
return node
def node(self, name):
"""Return node with the specified name"""
return self._nodes_by_name.get(name, None)
def get_node_by_idx(self, idx):
node = self._nodes_by_id.get(idx, None)
assert node is not None
return node
def get_input_nodes(self):
input_nodes = []
for node in self.nodes:
if (len(self.parents(node)) == 0) and \
(node.op.type==NNDCT_OP.INPUT or node.op.type==NNDCT_OP.TUPLE_INPUT):
input_nodes.append(node)
return input_nodes
def get_input_tensors(self, input_args):
input_tensors = []
graph_name = self.name
input_nodes = self.get_input_nodes()
for idx in range(len(input_args)):
#input_node_name = graph_name + "::input_" + str(idx)
#input_node = self.node(input_node_name)
input_node = input_nodes[idx]
input_tensor = input_node.out_tensors[0]
if input_node.op.type == NNDCT_OP.INPUT:
input_tensors.append(input_tensor.name)
elif input_node.op.type == NNDCT_OP.TUPLE_INPUT:
for index in range(len(input_args[idx])):
input_tensor_name = input_tensor.name + '.' + str(index)
input_tensors.append(input_tensor_name)
return input_tensors
def get_return_tensors(self):
return_tensors = []
for tensor in self.return_node.in_tensors:
return_tensors.append(tensor.name)
return return_tensors
def add_node(self, node: Node) -> None:
if node.name in self._nodes_by_name:
return
if node.idx in self._nodes_by_id and node is not self._nodes_by_id[node.idx]:
raise RuntimeError(f"The id `{node.idx}` of {node.name} has been added into graph")
if node.idx == -1:
# if not self._nodes_by_id:
# node._idx = 0
# else:
# node._idx = max([node.idx for node in self.all_nodes()]) + 1
node._idx = -sys.maxsize + len(list(self.all_nodes()))
self._nodes_by_name[node.name] = node
self._nodes_by_id[node.idx] = node
def free_node(self, node):
node.owning_graph = None
self._nodes_by_name.pop(node.name)
self._nodes_by_id.pop(node.idx)
def remove_node(self, node):
assert node.in_tensors
assert len(node.out_tensors) == 1
out_tensor = node.out_tensors[0]
inp_tensor = node.in_tensors[0]
out_tensor.replace_uses_with(inp_tensor)
node.destroy()
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
if any([node_type in self.op_types for node_type in node_types]):
nodes_to_remove = []
for node in self.nodes:
if node.op.type in node_types:
nodes_to_remove.append(node)
for node in nodes_to_remove:
self.remove_node(node)
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
conv_nodes = []
for node in self.nodes:
if node.op.type in node_types:
conv_nodes.append(node)
else:
continue
return conv_nodes
def reconnect_nodes(self):
self._nodes_by_id.clear()
for idx, node in enumerate(self.nodes):
node.idx = idx
self._nodes_by_id[idx] = node
node.clean_connections()
self.connect_nodes()
def connect_nodes(self):
for nodeA in self.nodes:
for input_tensor in nodeA.in_tensors:
for nodeB in self.nodes:
if nodeB is not nodeA and input_tensor in nodeB.out_tensors:
#nodeB.outputs.add(input_tensor.node.name)
nodeB.add_out_node(nodeA.name)
nodeA.add_in_node(input_tensor.node.name)
def parents(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.in_nodes]
def children(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.out_nodes]
def add_tensor(self, tensor):
self._tensors[tensor.name] = tensor
def tensor(self, name):
return self._tensors.get(name, None)
def param_tensor(self, name):
for node in self.all_nodes():
for _, tensor in node.op.params.items():
if tensor.name == name:
return tensor
def add_end_tensor(self, tensor):
self._end_tensors.append(tensor)
def __repr__(self):
return f"Graph(name={self.name})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def description(self):
graph_des = {}
graph_des['graph_name'] = f"{self.__class__.__name__}"
graph_des['nodes'] = []
for n in sorted(self.nodes, key=lambda n: n.idx):
graph_des['nodes'].append(n.description())
return graph_des
def set_node_id(self, index, node):
node.idx = index
self._nodes_by_id[index] = node
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
"""
create a subgraph from nodeset belong to origin graph
"""
assert len(nodeset) >= 2
sorted_nodeset = origin_graph.top_sort_nodeset(nodeset)
for node in sorted_nodeset:
node.remove_from_list()
subgraph = cls(graph_name)
sorted_nodeset[0].owning_graph = subgraph
sorted_nodeset[-1].owning_graph = subgraph
block = Block(subgraph, None, sorted_nodeset[0], sorted_nodeset[-1])
subgraph.set_top_block(block)
if len(sorted_nodeset) > 2:
for node in sorted_nodeset[1:-1]:
node.owning_graph = subgraph
subgraph.append_node(node)
return subgraph
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
sorted_nodeset = sorted(nodeset, key=lambda n: n.topo_position)
return sorted_nodeset
def get_topological_graph_nodes_list(self):
nodes_list = [node for node in self.nodes]
return Graph.top_sort_nodeset(nodes_list)
def name(self):
return self._name
def name(self, name):
self._name = name
def nodes(self):
return self._top_block.nodes
def reverse_nodes(self):
return self._top_block.reverse_nodes
def tensors(self):
for tensor in self._tensors.values():
yield tensor
# TODO: Remove
def end_tensors(self):
return [tensor for tensor in self.return_node.in_tensors]
def inputs(self):
return [node for node in self.all_nodes() if not node.in_nodes]
def outputs(self):
return [node for node in self.all_nodes() if not node.out_nodes]
def op_types(self):
return {node.op.type for node in self.all_nodes()}
def append_node(self, node):
self._top_block.append_node(node)
def add_param_name(self, param_name):
if param_name not in self._param_names:
self._param_names.append(param_name)
def param_names(self):
return list(self._param_names)
def block(self):
return self._top_block
def is_tensor_in_graph(self, tensor_name):
return True if tensor_name in self._tensors else False
def update_node_idx(self, node, index):
self._nodes_by_id[index] = node
def clear_node_id_map(self):
self._nodes_by_id.clear()
def remove_tensor(self, tensor):
self._tensors.pop(tensor.name)
if tensor.name in self._param_names:
self._param_names.remove(tensor.name)
def insert_node_between_nodes(self, new_node, parent_node, child_node):
assert parent_node.in_node_list() and child_node.in_node_list()
assert (parent_node.owning_graph == child_node.owning_graph
and parent_node.owning_block == child_node.owning_block)
new_node.owning_block = parent_node.owning_block
new_node.owning_graph = parent_node.owning_graph
tensor = Tensor(name=new_node.name, node=new_node)
new_node.add_out_tensor(tensor)
out_tensor = None
offset = None
for out in parent_node.out_tensors:
for use in out.uses:
if use.user is child_node:
out_tensor = out
offset = use.offset
break
#out_tensor.replace_uses_with(new_node.out_tensors[0])
child_node.replace_input_at(offset, new_node.out_tensors[0])
new_node.add_in_tensor(out_tensor)
new_node.insert_after(parent_node)
def set_top_block(self, block):
self._top_block = block
def add_block(self, block):
self._blocks.append(block)
def all_blocks(self):
return self._blocks
def all_nodes(self):
for _, node in self._nodes_by_name.items():
yield node
def head_node(self):
return self._top_block.input_node
def return_node(self):
return self._top_block.return_node
def clean_tensors_data(self):
for tensor in self.tensors:
tensor.clean_data()
def assign_node_topological_name(self, prefix="", suffix=""):
count = itertools.count(0)
illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
def _assgin_nodes(nodes):
sorted_nodes = self.top_sort_nodeset(list(nodes))
for n in sorted_nodes:
if n.blocks:
for block in n.blocks:
_assgin_nodes(block.nodes)
else:
candidate = illegal_char_regex.sub("_", n.op.type)
n.normalized_name = f"{prefix}{candidate}_{next(count)}{suffix}"
_assgin_nodes(self.nodes)
def simple_description(self):
"""
Only describe op type topological info.
"""
def get_node_simple_info(node):
node_des = {}
node_des['op'] = node.op.type
node_des["input_ops"] = [node.owning_graph.node(inode).op.type for inode in node.in_nodes]
node_des["output_ops"] = [node.owning_graph.node(onode).op.type for onode in node.out_nodes]
if node.blocks:
for i, block in enumerate(node.blocks):
node_des[f'block_{i}'] = []
for n in self.top_sort_nodeset(list(block.nodes)):
node_des[f'block_{i}'].append(get_node_simple_info(n))
return node_des
graph_des = {}
graph_des['nodes'] = []
for n in self.top_sort_nodeset(list(self.nodes)):
graph_des['nodes'].append(get_node_simple_info(n))
graph_str = json.dumps(graph_des, indent=2, separators=(',', ': '))
return graph_str
def get_md5(self):
import hashlib
graph_str = self.simple_description()
md = hashlib.md5()
md.update(graph_str.encode("utf-8"))
return md.hexdigest()
class Node(NodeBase):
"""A node contains an op and its input and output tensor.
"""
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
super().__init__()
self._name = name
self._op = op
self._dtype = dtype
self._idx = -1
self._scope_name = ""
self._source_range = ""
self._normalized_name = ""
self._in_tensors = []
self._out_tensors = []
self._in_nodes = []
self._out_nodes = []
self._blocks = []
self._is_quantizable = in_quant_part
self._is_merged = False
self._transpose_in_order = None
self._transpose_out_order = None
self._topo_position = 0
self._block = None
self._graph = None
self._neighbor_nodes = [None, None]
self._target_device = None
def __repr__(self):
return f"Node(name={self.name}, id={self.idx}, op_type={self.op.type}, quant_state={self.in_quant_part})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_node, local_map):
tmp_attrs = src_node.op._attrs
tmp_params = src_node.op._params
tmp_configs = src_node.op._configs
src_node.op._params = copy.copy(tmp_params)
src_node.op._attrs = copy.copy(tmp_attrs)
src_node.op._configs = copy.copy(tmp_configs)
self.op = copy.copy(src_node.op)
self.op._export_attr_and_param()
src_node.op._attrs = tmp_attrs
src_node.op._params = tmp_params
src_node.op._configs = tmp_configs
self.op.clone_from(src_node.op, local_map)
def scope_name(self):
return self._scope_name
def scope_name(self, name):
self._scope_name = name
def description(self):
node_des = {}
node_des['name'] = self._name
node_des['scope_name'] = self._scope_name
node_des['idx'] = self._idx
node_des['dtype'] = self._dtype
node_des['enable_quant'] = self._is_quantizable
node_des['in_nodes'] = [i for i in self.in_nodes]
node_des['out_nodes'] = [o for o in self.out_nodes]
node_des['in_tensors'] = [it.description() for it in self.in_tensors]
node_des['out_tensors'] = [ot.description() for ot in self.out_tensors]
node_des['op'] = self._op.description()
if self._blocks:
for i, block in enumerate(self._blocks):
node_des[f'block_{i}'] = []
for n in sorted(block.nodes, key=lambda n: n.idx):
node_des[f'block_{i}'].append(n.description())
return node_des
def clean_connections(self):
self._in_nodes = []
self._out_nodes = []
def add_in_node(self, node_name: str):
if node_name not in self._in_nodes:
self._in_nodes.append(node_name)
def add_out_node(self, node_name: str):
if node_name not in self._out_nodes:
self._out_nodes.append(node_name)
def in_tensors(self):
return self._in_tensors
def out_tensors(self):
return self._out_tensors
def in_nodes(self):
nodes = []
for tensor in self.in_tensors:
if tensor.node is not None:
nodes.append(tensor.node.name)
return nodes
def out_nodes(self):
nodes = []
for out in self.out_tensors:
for use in out.uses:
nodes.append(use.user.name)
return nodes
def node_attr(self, key):
return self._op.get_attr(key)
def set_node_attr(self, key, value):
if all([val is None for val in self._op._attr_value_mem[key]]):
self._op.set_attr(key, value)
else:
self._op.update_attr(key, value)
def node_config(self, key):
return self._op.get_config(key)
def set_node_config(self, key, value):
self._op.set_config(key, value)
def has_bound_params(self):
return self._op.has_native_params()
def op_type(self):
return self.op.type
def name(self):
return self._name
def name(self, value):
self._name = value
def idx(self):
return self._idx
def idx(self, index):
self._idx = index
self.owning_graph.update_node_idx(self, index)
def op(self):
return self._op
def op(self, op):
self._op = op
def dtype(self):
return self._dtype
# @property
# def alias(self):
# return self._alias
def in_quant_part(self) -> bool:
return self._is_quantizable
def in_quant_part(self, quant_state: bool) -> None:
self._is_quantizable = quant_state
def module(self):
return self._module()
def module(self, module):
self._module = weakref.ref(module)
def blocks(self):
return self._blocks
def add_block(self, block):
self._blocks.append(block)
def has_custom_op(self):
return isinstance(self.op, CustomOp)
def get_attr_val(self, attr_name):
attr = self.node_attr(attr_name)
return attr.data if isinstance(attr, Tensor) else attr
def merged(self):
return self._is_merged
def merged(self, flag):
self._is_merged = flag
def transpose_in_order(self):
return self._transpose_in_order
def transpose_in_order(self, order):
self._transpose_in_order = order
def transpose_out_order(self):
return self._transpose_out_order
def transpose_out_order(self, order):
self._transpose_out_order = order
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
for attr_name, attr_value in self.op.attrs.items():
if attr_value.value is old_tensor:
self.set_node_attr(attr_name, new_tensor)
def destroy(self):
if len(self.blocks) > 0:
raise RuntimeError("Can't destroy if or loop node.")
while len(self.out_tensors) > 0:
self.remove_output(len(self.out_tensors) - 1)
self.remove_all_inputs()
if self.in_node_list():
self.remove_from_list()
self.owning_graph.free_node(self)
def remove_output(self, i):
assert i < len(self.out_tensors)
assert len(self.out_tensors[i].uses) == 0
output = self.out_tensors.pop(i)
self.owning_graph.remove_tensor(output)
for output_offset in range(i, len(self.out_tensors)):
self.out_tensors[output_offset].offset -= 1
def replace_input_at(self, i, new_tensor):
old_tensor = self.in_tensors[i]
if old_tensor is new_tensor:
return
self.in_tensors[i] = new_tensor
uses = [u for u in old_tensor.uses]
attr_uses = [attr_u for attr_u in old_tensor.attr_uses]
for u in uses:
if u.user is self:
new_tensor.uses.append(u)
old_tensor.uses.remove(u)
for attr_u in attr_uses:
if attr_u.user is self.op:
old_tensor.replace_attr_with_new_tensor_v2(attr_u, new_tensor)
def remove_input(self, i):
self.drop_input(i)
for j in range(i + 1, len(self._in_tensors)):
it = self.find_use_for_input(j)
it.offset -= 1
self._in_tensors.pop(i)
def remove_all_inputs(self):
for i in range(len(self.in_tensors)):
self.drop_input(i)
self.in_tensors.clear()
def drop_input(self, i):
assert i < len(self.in_tensors)
input_value = self.in_tensors[i]
use_it = self.find_use_for_input(i)
input_value.uses.remove(use_it)
self.in_tensors[i] = None
return input_value
def find_use_for_input(self, i):
use_it = None
for use in self.in_tensors[i].uses:
if use.offset == i and use.user is self:
use_it = use
assert use_it is not None
return use_it
def owning_block(self):
return self._block
def owning_block(self, block):
self._block = block
def owning_graph(self):
return self._graph
def owning_graph(self, graph):
self._graph = graph
if self._graph:
self._graph.add_node(self)
def topo_position(self):
return self._topo_position
def topo_position(self, pos):
self._topo_position = pos
def insert_before(self, node):
assert node.in_node_list()
self.insert_after(node.prev_node)
def insert_after(self, node):
assert not self.in_node_list() and node.in_node_list()
assert node.owning_block is not None
self._block = node.owning_block
next_node = node.next_node
node.next_node = self
self.prev_node = node
self.next_node = next_node
next_node.prev_node = self
self.update_topo_position()
def update_topo_position(self):
is_first_node = self.prev_node is self.owning_block.input_node
is_last_node = self.next_node is self.owning_block.return_node
prev_pos = self.prev_node.topo_position
next_pos = self.next_node.topo_position
if is_last_node:
if is_first_node:
self.topo_position = MID_POSITION
return
if prev_pos >= (POSITION_UPPER_BOUND - APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = prev_pos + APPEND_INTERVAL
elif is_first_node:
if next_pos <= (POSITION_LOWER_BOUND + APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = next_pos - APPEND_INTERVAL
else:
pos_between = prev_pos + (next_pos - prev_pos) / 2
if pos_between == prev_pos:
self.owning_block.reindex_topo()
return
self.topo_position = pos_between
def next_node(self):
return self._neighbor_nodes[1]
def next_node(self, node):
self._neighbor_nodes[1] = node
def prev_node(self):
return self._neighbor_nodes[0]
def prev_node(self, node):
self._neighbor_nodes[0] = node
def in_node_list(self):
if self.next_node is None:
assert self.prev_node is None
return self.next_node is not None
def remove_from_list(self):
assert self.in_node_list()
if self.owning_block.input_node is self:
self.owning_block.input_node = self.next_node
self.owning_block = None
next_node = self.next_node
prev_node = self.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
self.next_node = None
self.prev_node = None
def add_in_tensor(self, tensor):
tensor.uses.append(Use(self, len(self.in_tensors)))
self._in_tensors.append(tensor)
self.owning_graph.add_tensor(tensor)
def add_out_tensor(self, tensor):
tensor.offset = len(self.out_tensors)
self._out_tensors.append(tensor)
tensor.node = self
self.owning_graph.add_tensor(tensor)
def target_device(self):
return self._target_device
def target_device(self, device):
self._target_device = device
def scope_name(self):
return self._scope_name
def scope_name(self, scope_name):
self._scope_name = scope_name
def source_range(self):
return self._source_range
def source_range(self, source_range):
self._source_range = source_range
def normalized_name(self):
return self._normalized_name
def normalized_name(self, name):
self._normalized_name = name
def modify_dense(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, WeightedNodeStructuredExpanding), \
"Variable node_expanding here has to be instance of WeightedNodeStructuredExpanding"
input_expanding = expanding_desc[node.in_nodes[0]]
original_input_channels = input_expanding.out_dim - input_expanding.added_out_channel
spatial_size = node.op.attr["in_dim"] // original_input_channels
data_format = graph.data_format if hasattr(
graph, 'data_format') else 'channels_first'
if data_format == 'channels_last':
for weight_insert in input_expanding.out_inserts:
for i in range(spatial_size):
node_expanding.add_weight_in_insert(
DataInsert(weight_insert.position + i * original_input_channels, weight_insert.added_num_channels))
else:
for weight_insert in input_expanding.out_inserts:
node_expanding.add_weight_in_insert(
DataInsert(weight_insert.position * spatial_size, weight_insert.added_num_channels * spatial_size))
node.op.attr["in_dim"] += node_expanding.added_in_channel | null |
23,167 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class DataInsert(object):
def __init__(self, position: int = 0, added_num_channels: int = 0, added_data: Tensor = None) -> None:
self._position: int = position
self._added_num_channels: int = added_num_channels
self._added_data: Tensor = added_data
def position(self) -> int:
return self._position
def added_num_channels(self) -> int:
return self._added_num_channels
def added_data(self) -> Tensor:
return self._added_data
def added_data(self, data: Tensor) -> None:
self._added_data = data
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
self._node_name: str = node_name
self._in_dim: int = 0
self._out_dim: int = 0
def node_name(self) -> str:
return self._node_name
def in_dim(self) -> int:
return self._in_dim
def in_dim(self, v: int) -> None:
self._in_dim = v
def out_dim(self) -> int:
return self._out_dim
def out_dim(self, v: int) -> None:
self._out_dim = v
def added_out_channel(self) -> int:
raise NotImplementedError("method added_out_channel is not implemented")
def added_in_channel(self) -> int:
raise NotImplementedError("method added_in_channel is not implemented")
def out_inserts(self) -> List[DataInsert]:
raise NotImplementedError("method out_inserts is not implemented")
class GenericStructuredExpanding(StructuredExpanding):
def __init__(self, node_name: str) -> None:
super().__init__(node_name)
self._inserts: List[DataInsert] = []
def added_out_channel(self) -> int:
ret = 0
for insert in self._inserts:
ret += insert.added_num_channels
return ret
def added_in_channel(self) -> int:
return self.added_out_channel
def out_inserts(self) -> List[DataInsert]:
return self._inserts
def add_insert(self, insert: DataInsert):
self._inserts.append(insert)
class Graph(GraphBase):
""" Graph object of NNDCT, contain list of NndctNodes.
That will be used for topology or export to XGraph"""
def __init__(self, graph_name=None):
super(Graph, self).__init__()
self._name = graph_name or 'NndctGraph'
self._nodes_by_name = {}
self._nodes_by_id = {}
self._end_tensors = []
self._copy_tensor_map = {}
self._tensors = {}
self._blocks = []
self._param_names = []
self._top_block = None
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
if isinstance(node_or_name, str):
return node_or_name in self._nodes_by_name
else:
return node_or_name.name in self._nodes_by_name
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone(self):
graph = self.__class__(self.name)
graph.clone_from(self)
return graph
def clone_from(self, src_graph):
local_map = {}
converted_nodes = []
head_node = self.create_node_from(src_graph.head_node, local_map, converted_nodes)
return_node = self.create_node_from(src_graph.return_node, local_map, converted_nodes)
top_block = Block(self, None, head_node, return_node)
self.set_top_block(top_block)
self._top_block.clone_from(src_graph.block, local_map, converted_nodes)
def create_node_from(self, src_node, local_map, converted_nodes):
node = Node(src_node.name, dtype=src_node.dtype, in_quant_part=src_node.in_quant_part)
node.owning_graph = self
node.idx = src_node.idx
node.scope_name = src_node.scope_name
node.source_range = src_node.source_range
node.target_device = src_node.target_device
node.normalized_name = src_node.normalized_name
converted_nodes.append(src_node.name)
for out in src_node.out_tensors:
if out.name in local_map:
node.add_out_tensor(local_map[out.name])
else:
tensor = Tensor(name=out.name)
tensor.clone_from(out)
local_map[out.name] = tensor
node.add_out_tensor(tensor)
for inp in src_node.in_tensors:
if inp.name in local_map:
node.add_in_tensor(local_map[inp.name])
else:
tensor = Tensor(name=inp.name)
tensor.clone_from(inp)
local_map[inp.name] = tensor
node.add_in_tensor(tensor)
node.clone_from(src_node, local_map)
for src_block in src_node.blocks:
head_node = self.create_node_from(src_block.input_node, local_map, converted_nodes)
return_node = self.create_node_from(src_block.return_node, local_map, converted_nodes)
block = Block(self, node, head_node, return_node)
block.clone_from(src_block, local_map, converted_nodes)
node.add_block(block)
return node
def node(self, name):
"""Return node with the specified name"""
return self._nodes_by_name.get(name, None)
def get_node_by_idx(self, idx):
node = self._nodes_by_id.get(idx, None)
assert node is not None
return node
def get_input_nodes(self):
input_nodes = []
for node in self.nodes:
if (len(self.parents(node)) == 0) and \
(node.op.type==NNDCT_OP.INPUT or node.op.type==NNDCT_OP.TUPLE_INPUT):
input_nodes.append(node)
return input_nodes
def get_input_tensors(self, input_args):
input_tensors = []
graph_name = self.name
input_nodes = self.get_input_nodes()
for idx in range(len(input_args)):
#input_node_name = graph_name + "::input_" + str(idx)
#input_node = self.node(input_node_name)
input_node = input_nodes[idx]
input_tensor = input_node.out_tensors[0]
if input_node.op.type == NNDCT_OP.INPUT:
input_tensors.append(input_tensor.name)
elif input_node.op.type == NNDCT_OP.TUPLE_INPUT:
for index in range(len(input_args[idx])):
input_tensor_name = input_tensor.name + '.' + str(index)
input_tensors.append(input_tensor_name)
return input_tensors
def get_return_tensors(self):
return_tensors = []
for tensor in self.return_node.in_tensors:
return_tensors.append(tensor.name)
return return_tensors
def add_node(self, node: Node) -> None:
if node.name in self._nodes_by_name:
return
if node.idx in self._nodes_by_id and node is not self._nodes_by_id[node.idx]:
raise RuntimeError(f"The id `{node.idx}` of {node.name} has been added into graph")
if node.idx == -1:
# if not self._nodes_by_id:
# node._idx = 0
# else:
# node._idx = max([node.idx for node in self.all_nodes()]) + 1
node._idx = -sys.maxsize + len(list(self.all_nodes()))
self._nodes_by_name[node.name] = node
self._nodes_by_id[node.idx] = node
def free_node(self, node):
node.owning_graph = None
self._nodes_by_name.pop(node.name)
self._nodes_by_id.pop(node.idx)
def remove_node(self, node):
assert node.in_tensors
assert len(node.out_tensors) == 1
out_tensor = node.out_tensors[0]
inp_tensor = node.in_tensors[0]
out_tensor.replace_uses_with(inp_tensor)
node.destroy()
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
if any([node_type in self.op_types for node_type in node_types]):
nodes_to_remove = []
for node in self.nodes:
if node.op.type in node_types:
nodes_to_remove.append(node)
for node in nodes_to_remove:
self.remove_node(node)
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
conv_nodes = []
for node in self.nodes:
if node.op.type in node_types:
conv_nodes.append(node)
else:
continue
return conv_nodes
def reconnect_nodes(self):
self._nodes_by_id.clear()
for idx, node in enumerate(self.nodes):
node.idx = idx
self._nodes_by_id[idx] = node
node.clean_connections()
self.connect_nodes()
def connect_nodes(self):
for nodeA in self.nodes:
for input_tensor in nodeA.in_tensors:
for nodeB in self.nodes:
if nodeB is not nodeA and input_tensor in nodeB.out_tensors:
#nodeB.outputs.add(input_tensor.node.name)
nodeB.add_out_node(nodeA.name)
nodeA.add_in_node(input_tensor.node.name)
def parents(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.in_nodes]
def children(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.out_nodes]
def add_tensor(self, tensor):
self._tensors[tensor.name] = tensor
def tensor(self, name):
return self._tensors.get(name, None)
def param_tensor(self, name):
for node in self.all_nodes():
for _, tensor in node.op.params.items():
if tensor.name == name:
return tensor
def add_end_tensor(self, tensor):
self._end_tensors.append(tensor)
def __repr__(self):
return f"Graph(name={self.name})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def description(self):
graph_des = {}
graph_des['graph_name'] = f"{self.__class__.__name__}"
graph_des['nodes'] = []
for n in sorted(self.nodes, key=lambda n: n.idx):
graph_des['nodes'].append(n.description())
return graph_des
def set_node_id(self, index, node):
node.idx = index
self._nodes_by_id[index] = node
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
"""
create a subgraph from nodeset belong to origin graph
"""
assert len(nodeset) >= 2
sorted_nodeset = origin_graph.top_sort_nodeset(nodeset)
for node in sorted_nodeset:
node.remove_from_list()
subgraph = cls(graph_name)
sorted_nodeset[0].owning_graph = subgraph
sorted_nodeset[-1].owning_graph = subgraph
block = Block(subgraph, None, sorted_nodeset[0], sorted_nodeset[-1])
subgraph.set_top_block(block)
if len(sorted_nodeset) > 2:
for node in sorted_nodeset[1:-1]:
node.owning_graph = subgraph
subgraph.append_node(node)
return subgraph
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
sorted_nodeset = sorted(nodeset, key=lambda n: n.topo_position)
return sorted_nodeset
def get_topological_graph_nodes_list(self):
nodes_list = [node for node in self.nodes]
return Graph.top_sort_nodeset(nodes_list)
def name(self):
return self._name
def name(self, name):
self._name = name
def nodes(self):
return self._top_block.nodes
def reverse_nodes(self):
return self._top_block.reverse_nodes
def tensors(self):
for tensor in self._tensors.values():
yield tensor
# TODO: Remove
def end_tensors(self):
return [tensor for tensor in self.return_node.in_tensors]
def inputs(self):
return [node for node in self.all_nodes() if not node.in_nodes]
def outputs(self):
return [node for node in self.all_nodes() if not node.out_nodes]
def op_types(self):
return {node.op.type for node in self.all_nodes()}
def append_node(self, node):
self._top_block.append_node(node)
def add_param_name(self, param_name):
if param_name not in self._param_names:
self._param_names.append(param_name)
def param_names(self):
return list(self._param_names)
def block(self):
return self._top_block
def is_tensor_in_graph(self, tensor_name):
return True if tensor_name in self._tensors else False
def update_node_idx(self, node, index):
self._nodes_by_id[index] = node
def clear_node_id_map(self):
self._nodes_by_id.clear()
def remove_tensor(self, tensor):
self._tensors.pop(tensor.name)
if tensor.name in self._param_names:
self._param_names.remove(tensor.name)
def insert_node_between_nodes(self, new_node, parent_node, child_node):
assert parent_node.in_node_list() and child_node.in_node_list()
assert (parent_node.owning_graph == child_node.owning_graph
and parent_node.owning_block == child_node.owning_block)
new_node.owning_block = parent_node.owning_block
new_node.owning_graph = parent_node.owning_graph
tensor = Tensor(name=new_node.name, node=new_node)
new_node.add_out_tensor(tensor)
out_tensor = None
offset = None
for out in parent_node.out_tensors:
for use in out.uses:
if use.user is child_node:
out_tensor = out
offset = use.offset
break
#out_tensor.replace_uses_with(new_node.out_tensors[0])
child_node.replace_input_at(offset, new_node.out_tensors[0])
new_node.add_in_tensor(out_tensor)
new_node.insert_after(parent_node)
def set_top_block(self, block):
self._top_block = block
def add_block(self, block):
self._blocks.append(block)
def all_blocks(self):
return self._blocks
def all_nodes(self):
for _, node in self._nodes_by_name.items():
yield node
def head_node(self):
return self._top_block.input_node
def return_node(self):
return self._top_block.return_node
def clean_tensors_data(self):
for tensor in self.tensors:
tensor.clean_data()
def assign_node_topological_name(self, prefix="", suffix=""):
count = itertools.count(0)
illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
def _assgin_nodes(nodes):
sorted_nodes = self.top_sort_nodeset(list(nodes))
for n in sorted_nodes:
if n.blocks:
for block in n.blocks:
_assgin_nodes(block.nodes)
else:
candidate = illegal_char_regex.sub("_", n.op.type)
n.normalized_name = f"{prefix}{candidate}_{next(count)}{suffix}"
_assgin_nodes(self.nodes)
def simple_description(self):
"""
Only describe op type topological info.
"""
def get_node_simple_info(node):
node_des = {}
node_des['op'] = node.op.type
node_des["input_ops"] = [node.owning_graph.node(inode).op.type for inode in node.in_nodes]
node_des["output_ops"] = [node.owning_graph.node(onode).op.type for onode in node.out_nodes]
if node.blocks:
for i, block in enumerate(node.blocks):
node_des[f'block_{i}'] = []
for n in self.top_sort_nodeset(list(block.nodes)):
node_des[f'block_{i}'].append(get_node_simple_info(n))
return node_des
graph_des = {}
graph_des['nodes'] = []
for n in self.top_sort_nodeset(list(self.nodes)):
graph_des['nodes'].append(get_node_simple_info(n))
graph_str = json.dumps(graph_des, indent=2, separators=(',', ': '))
return graph_str
def get_md5(self):
import hashlib
graph_str = self.simple_description()
md = hashlib.md5()
md.update(graph_str.encode("utf-8"))
return md.hexdigest()
class Node(NodeBase):
"""A node contains an op and its input and output tensor.
"""
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
super().__init__()
self._name = name
self._op = op
self._dtype = dtype
self._idx = -1
self._scope_name = ""
self._source_range = ""
self._normalized_name = ""
self._in_tensors = []
self._out_tensors = []
self._in_nodes = []
self._out_nodes = []
self._blocks = []
self._is_quantizable = in_quant_part
self._is_merged = False
self._transpose_in_order = None
self._transpose_out_order = None
self._topo_position = 0
self._block = None
self._graph = None
self._neighbor_nodes = [None, None]
self._target_device = None
def __repr__(self):
return f"Node(name={self.name}, id={self.idx}, op_type={self.op.type}, quant_state={self.in_quant_part})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_node, local_map):
tmp_attrs = src_node.op._attrs
tmp_params = src_node.op._params
tmp_configs = src_node.op._configs
src_node.op._params = copy.copy(tmp_params)
src_node.op._attrs = copy.copy(tmp_attrs)
src_node.op._configs = copy.copy(tmp_configs)
self.op = copy.copy(src_node.op)
self.op._export_attr_and_param()
src_node.op._attrs = tmp_attrs
src_node.op._params = tmp_params
src_node.op._configs = tmp_configs
self.op.clone_from(src_node.op, local_map)
def scope_name(self):
return self._scope_name
def scope_name(self, name):
self._scope_name = name
def description(self):
node_des = {}
node_des['name'] = self._name
node_des['scope_name'] = self._scope_name
node_des['idx'] = self._idx
node_des['dtype'] = self._dtype
node_des['enable_quant'] = self._is_quantizable
node_des['in_nodes'] = [i for i in self.in_nodes]
node_des['out_nodes'] = [o for o in self.out_nodes]
node_des['in_tensors'] = [it.description() for it in self.in_tensors]
node_des['out_tensors'] = [ot.description() for ot in self.out_tensors]
node_des['op'] = self._op.description()
if self._blocks:
for i, block in enumerate(self._blocks):
node_des[f'block_{i}'] = []
for n in sorted(block.nodes, key=lambda n: n.idx):
node_des[f'block_{i}'].append(n.description())
return node_des
def clean_connections(self):
self._in_nodes = []
self._out_nodes = []
def add_in_node(self, node_name: str):
if node_name not in self._in_nodes:
self._in_nodes.append(node_name)
def add_out_node(self, node_name: str):
if node_name not in self._out_nodes:
self._out_nodes.append(node_name)
def in_tensors(self):
return self._in_tensors
def out_tensors(self):
return self._out_tensors
def in_nodes(self):
nodes = []
for tensor in self.in_tensors:
if tensor.node is not None:
nodes.append(tensor.node.name)
return nodes
def out_nodes(self):
nodes = []
for out in self.out_tensors:
for use in out.uses:
nodes.append(use.user.name)
return nodes
def node_attr(self, key):
return self._op.get_attr(key)
def set_node_attr(self, key, value):
if all([val is None for val in self._op._attr_value_mem[key]]):
self._op.set_attr(key, value)
else:
self._op.update_attr(key, value)
def node_config(self, key):
return self._op.get_config(key)
def set_node_config(self, key, value):
self._op.set_config(key, value)
def has_bound_params(self):
return self._op.has_native_params()
def op_type(self):
return self.op.type
def name(self):
return self._name
def name(self, value):
self._name = value
def idx(self):
return self._idx
def idx(self, index):
self._idx = index
self.owning_graph.update_node_idx(self, index)
def op(self):
return self._op
def op(self, op):
self._op = op
def dtype(self):
return self._dtype
# @property
# def alias(self):
# return self._alias
def in_quant_part(self) -> bool:
return self._is_quantizable
def in_quant_part(self, quant_state: bool) -> None:
self._is_quantizable = quant_state
def module(self):
return self._module()
def module(self, module):
self._module = weakref.ref(module)
def blocks(self):
return self._blocks
def add_block(self, block):
self._blocks.append(block)
def has_custom_op(self):
return isinstance(self.op, CustomOp)
def get_attr_val(self, attr_name):
attr = self.node_attr(attr_name)
return attr.data if isinstance(attr, Tensor) else attr
def merged(self):
return self._is_merged
def merged(self, flag):
self._is_merged = flag
def transpose_in_order(self):
return self._transpose_in_order
def transpose_in_order(self, order):
self._transpose_in_order = order
def transpose_out_order(self):
return self._transpose_out_order
def transpose_out_order(self, order):
self._transpose_out_order = order
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
for attr_name, attr_value in self.op.attrs.items():
if attr_value.value is old_tensor:
self.set_node_attr(attr_name, new_tensor)
def destroy(self):
if len(self.blocks) > 0:
raise RuntimeError("Can't destroy if or loop node.")
while len(self.out_tensors) > 0:
self.remove_output(len(self.out_tensors) - 1)
self.remove_all_inputs()
if self.in_node_list():
self.remove_from_list()
self.owning_graph.free_node(self)
def remove_output(self, i):
assert i < len(self.out_tensors)
assert len(self.out_tensors[i].uses) == 0
output = self.out_tensors.pop(i)
self.owning_graph.remove_tensor(output)
for output_offset in range(i, len(self.out_tensors)):
self.out_tensors[output_offset].offset -= 1
def replace_input_at(self, i, new_tensor):
old_tensor = self.in_tensors[i]
if old_tensor is new_tensor:
return
self.in_tensors[i] = new_tensor
uses = [u for u in old_tensor.uses]
attr_uses = [attr_u for attr_u in old_tensor.attr_uses]
for u in uses:
if u.user is self:
new_tensor.uses.append(u)
old_tensor.uses.remove(u)
for attr_u in attr_uses:
if attr_u.user is self.op:
old_tensor.replace_attr_with_new_tensor_v2(attr_u, new_tensor)
def remove_input(self, i):
self.drop_input(i)
for j in range(i + 1, len(self._in_tensors)):
it = self.find_use_for_input(j)
it.offset -= 1
self._in_tensors.pop(i)
def remove_all_inputs(self):
for i in range(len(self.in_tensors)):
self.drop_input(i)
self.in_tensors.clear()
def drop_input(self, i):
assert i < len(self.in_tensors)
input_value = self.in_tensors[i]
use_it = self.find_use_for_input(i)
input_value.uses.remove(use_it)
self.in_tensors[i] = None
return input_value
def find_use_for_input(self, i):
use_it = None
for use in self.in_tensors[i].uses:
if use.offset == i and use.user is self:
use_it = use
assert use_it is not None
return use_it
def owning_block(self):
return self._block
def owning_block(self, block):
self._block = block
def owning_graph(self):
return self._graph
def owning_graph(self, graph):
self._graph = graph
if self._graph:
self._graph.add_node(self)
def topo_position(self):
return self._topo_position
def topo_position(self, pos):
self._topo_position = pos
def insert_before(self, node):
assert node.in_node_list()
self.insert_after(node.prev_node)
def insert_after(self, node):
assert not self.in_node_list() and node.in_node_list()
assert node.owning_block is not None
self._block = node.owning_block
next_node = node.next_node
node.next_node = self
self.prev_node = node
self.next_node = next_node
next_node.prev_node = self
self.update_topo_position()
def update_topo_position(self):
is_first_node = self.prev_node is self.owning_block.input_node
is_last_node = self.next_node is self.owning_block.return_node
prev_pos = self.prev_node.topo_position
next_pos = self.next_node.topo_position
if is_last_node:
if is_first_node:
self.topo_position = MID_POSITION
return
if prev_pos >= (POSITION_UPPER_BOUND - APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = prev_pos + APPEND_INTERVAL
elif is_first_node:
if next_pos <= (POSITION_LOWER_BOUND + APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = next_pos - APPEND_INTERVAL
else:
pos_between = prev_pos + (next_pos - prev_pos) / 2
if pos_between == prev_pos:
self.owning_block.reindex_topo()
return
self.topo_position = pos_between
def next_node(self):
return self._neighbor_nodes[1]
def next_node(self, node):
self._neighbor_nodes[1] = node
def prev_node(self):
return self._neighbor_nodes[0]
def prev_node(self, node):
self._neighbor_nodes[0] = node
def in_node_list(self):
if self.next_node is None:
assert self.prev_node is None
return self.next_node is not None
def remove_from_list(self):
assert self.in_node_list()
if self.owning_block.input_node is self:
self.owning_block.input_node = self.next_node
self.owning_block = None
next_node = self.next_node
prev_node = self.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
self.next_node = None
self.prev_node = None
def add_in_tensor(self, tensor):
tensor.uses.append(Use(self, len(self.in_tensors)))
self._in_tensors.append(tensor)
self.owning_graph.add_tensor(tensor)
def add_out_tensor(self, tensor):
tensor.offset = len(self.out_tensors)
self._out_tensors.append(tensor)
tensor.node = self
self.owning_graph.add_tensor(tensor)
def target_device(self):
return self._target_device
def target_device(self, device):
self._target_device = device
def scope_name(self):
return self._scope_name
def scope_name(self, scope_name):
self._scope_name = scope_name
def source_range(self):
return self._source_range
def source_range(self, source_range):
self._source_range = source_range
def normalized_name(self):
return self._normalized_name
def normalized_name(self, name):
self._normalized_name = name
def modify_concat(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
offset = 0
out_dim = 0
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, GenericStructuredExpanding), \
"Variable node_expanding here has to be instance of GenericStructuredExpanding"
for node in node.in_nodes:
input_expanding = expanding_desc[node]
out_dim += input_expanding.out_dim
for weight_insert in input_expanding.out_inserts:
node_expanding.add_insert(
DataInsert(offset + weight_insert.position, weight_insert.added_num_channels))
offset += input_expanding.out_dim - input_expanding.added_out_channel
node_expanding.out_dim = out_dim | null |
23,168 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
def node_name(self) -> str:
def in_dim(self) -> int:
def in_dim(self, v: int) -> None:
def out_dim(self) -> int:
def out_dim(self, v: int) -> None:
def added_out_channel(self) -> int:
def added_in_channel(self) -> int:
def out_inserts(self) -> List[DataInsert]:
class Graph(GraphBase):
def __init__(self, graph_name=None):
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
def __deepcopy__(self, memo):
def clone(self):
def clone_from(self, src_graph):
def create_node_from(self, src_node, local_map, converted_nodes):
def node(self, name):
def get_node_by_idx(self, idx):
def get_input_nodes(self):
def get_input_tensors(self, input_args):
def get_return_tensors(self):
def add_node(self, node: Node) -> None:
def free_node(self, node):
def remove_node(self, node):
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
def reconnect_nodes(self):
def connect_nodes(self):
def parents(self, node: Union[Node, str]) -> List[Node]:
def children(self, node: Union[Node, str]) -> List[Node]:
def add_tensor(self, tensor):
def tensor(self, name):
def param_tensor(self, name):
def add_end_tensor(self, tensor):
def __repr__(self):
def __str__(self):
def description(self):
def set_node_id(self, index, node):
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
def get_topological_graph_nodes_list(self):
def name(self):
def name(self, name):
def nodes(self):
def reverse_nodes(self):
def tensors(self):
def end_tensors(self):
def inputs(self):
def outputs(self):
def op_types(self):
def append_node(self, node):
def add_param_name(self, param_name):
def param_names(self):
def block(self):
def is_tensor_in_graph(self, tensor_name):
def update_node_idx(self, node, index):
def clear_node_id_map(self):
def remove_tensor(self, tensor):
def insert_node_between_nodes(self, new_node, parent_node, child_node):
def set_top_block(self, block):
def add_block(self, block):
def all_blocks(self):
def all_nodes(self):
def head_node(self):
def return_node(self):
def clean_tensors_data(self):
def assign_node_topological_name(self, prefix="", suffix=""):
def _assgin_nodes(nodes):
def simple_description(self):
def get_node_simple_info(node):
def get_md5(self):
class Node(NodeBase):
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
def __repr__(self):
def __str__(self):
def __deepcopy__(self, memo):
def clone_from(self, src_node, local_map):
def scope_name(self):
def scope_name(self, name):
def description(self):
def clean_connections(self):
def add_in_node(self, node_name: str):
def add_out_node(self, node_name: str):
def in_tensors(self):
def out_tensors(self):
def in_nodes(self):
def out_nodes(self):
def node_attr(self, key):
def set_node_attr(self, key, value):
def node_config(self, key):
def set_node_config(self, key, value):
def has_bound_params(self):
def op_type(self):
def name(self):
def name(self, value):
def idx(self):
def idx(self, index):
def op(self):
def op(self, op):
def dtype(self):
def in_quant_part(self) -> bool:
def in_quant_part(self, quant_state: bool) -> None:
def module(self):
def module(self, module):
def blocks(self):
def add_block(self, block):
def has_custom_op(self):
def get_attr_val(self, attr_name):
def merged(self):
def merged(self, flag):
def transpose_in_order(self):
def transpose_in_order(self, order):
def transpose_out_order(self):
def transpose_out_order(self, order):
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
def destroy(self):
def remove_output(self, i):
def replace_input_at(self, i, new_tensor):
def remove_input(self, i):
def remove_all_inputs(self):
def drop_input(self, i):
def find_use_for_input(self, i):
def owning_block(self):
def owning_block(self, block):
def owning_graph(self):
def owning_graph(self, graph):
def topo_position(self):
def topo_position(self, pos):
def insert_before(self, node):
def insert_after(self, node):
def update_topo_position(self):
def next_node(self):
def next_node(self, node):
def prev_node(self):
def prev_node(self, node):
def in_node_list(self):
def remove_from_list(self):
def add_in_tensor(self, tensor):
def add_out_tensor(self, tensor):
def target_device(self):
def target_device(self, device):
def scope_name(self):
def scope_name(self, scope_name):
def source_range(self):
def source_range(self, source_range):
def normalized_name(self):
def normalized_name(self, name):
CONV_OPS = [
OpTypes.CONV2D, OpTypes.CONVTRANSPOSE2D, OpTypes.CONV3D,
OpTypes.CONVTRANSPOSE3D, OpTypes.SEPARABLECONV2D
]
def find_prunable_ancestor(graph, node, target_ops=CONV_OPS):
])
def raise_if_has_pruned_input(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
for node_name in node.in_nodes:
input_pruning = expanding_desc[node_name]
if input_pruning.added_out_channel > 0:
input_node = graph.node(node_name)
if input_node.op.type in CONV_OPS:
prunable_node = input_node
else:
prunable_node = find_prunable_ancestor(graph, input_node)
raise RuntimeError(('Operation "{}" cannot take expanded tensor as input, '
'please exclude node "{}" from pruning.').format(
node.op.type, prunable_node.name)) | null |
23,169 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def kernel_need_quant(quantizer, node):
if NndctOption.nndct_quant_off.value:
return False
elif quantizer is None:
return False
else:
return quantizer.configer.is_node_quantizable(node, lstm=quantizer.lstm) | null |
23,170 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def normal_quant_neuron(data,
maxamps=[[32768], [2048]],
strides=[-1],
round_method=2,
keep_scale=True,
name='',
quantizer=None,
on_gpu=True,
as_int=False):
def quantize_data2int(data, bn, fp, method=2):
return normal_quant_neuron(
data, maxamps=[[2**(bn - 1)], [2**fp]], round_method=method, as_int=True) | null |
23,171 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def is_quant_end_point(graph, node, quant_types):
if len(graph.parents(node.name)) == 0:
return False
__QuantNodes = []
def __check_end(node_name):
if graph.node(node_name).op.type in quant_types:
__QuantNodes.append(node_name)
def __children_names(node_name):
for c in graph.children(node_name):
if len(__QuantNodes) >= 1:
break
yield c.name
breadth_first_search_handler(
node.name, generator=__children_names, handler=__check_end)
return len(__QuantNodes) == 0 | null |
23,172 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def get_flows_and_info(quant_mode,
quantizer,
node_name=None,
params=None,
inputs=None):
node = quantizer.configer.get_Nndctnode(node_name, params, inputs)
return None, quantizer.configer.quant_input_names(
node, inputs, params), (quantizer.configer.quant_output(node).name, True) | null |
23,173 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def maybe_get_quantizer(quantizer=None):
quantizer = quantizer or GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
if quantizer:
return quantizer.quant_mode, quantizer
else:
return GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_MODE), None
def quantize_tensors(tensors, node, tensor_names=None, tensor_type='output', method=None):
quant_mode, quantizer = maybe_get_quantizer()
if quantizer is None:
return tensors
elif tensor_type != 'output' and (not node.in_quant_part):
return tensors
# custom op output may need quantization for its following node
elif not node.in_quant_part and not node.op.is_custom_op:
return tensors
qtensors = []
if quant_mode in [1, 3]:
qfunc = quantizer.calibrate
elif quant_mode == 2:
qfunc = quantizer.quantize
tname = node.name
datatype = 'int'
for idx in range(len(tensors)):
if tensor_type == 'param':
tname = tensor_names[idx]
index = 0
else:
index = idx
if (quantizer.need_quantize_tensor(tname, tensor_type)):
if NndctOption.nndct_only_int_quant.value is False:
datatype = quantizer.get_quant_dtype(tname, tensor_type) if tensor_type=='param' else \
quantizer.get_quant_dtype(node.name, tensor_type)
qtensors.append(qfunc(
tensors[idx],
tname,
node,
tensor_type,
index,
method=method,
datatype=datatype))
else:
qtensors.append(tensors[idx])
return qtensors | null |
23,174 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def maybe_get_quantizer(quantizer=None):
quantizer = quantizer or GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
if quantizer:
return quantizer.quant_mode, quantizer
else:
return GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_MODE), None
def quant_reluk_params(node, channel_max):
quant_mode, quantizer = maybe_get_quantizer()
# ignore parameters quantization if the node is not to be quantized
#print('---- quant o: {}, in quant part:{}'.format(node.name, node.in_quant_part))
if not node.in_quant_part or quantizer is None:
return channel_max
if quantizer.need_quantize_tensor(node.name, 'output'):
#print('---- quant o: {}'.format(node.name))
output_name = node.name
#print('qmode = %d, q_end: %d activation: %s' %
# (quant_mode, is_quant_end, output_name))
if quant_mode == 2:
datatype = 'int'
if NndctOption.nndct_only_int_quant.value is False:
datatype = quantizer.get_quant_dtype(node.name, tensor_type='output')
channel_max = quantizer.quantize(
channel_max, output_name, node, tensor_type='output', datatype=datatype)
return channel_max | null |
23,175 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def maybe_get_quantizer(quantizer=None):
quantizer = quantizer or GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANTIZER)
if quantizer:
return quantizer.quant_mode, quantizer
else:
return GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_MODE), None
def quant_channel_scale_params(node, channel_scale):
quant_mode, quantizer = maybe_get_quantizer()
# ignore parameters quantization if the node is not to be quantized
#print('---- quant o: {}, in quant part:{}'.format(node.name, node.in_quant_part))
if not node.in_quant_part or quantizer is None:
return channel_scale
if quantizer.need_quantize_tensor(node.name, 'output'):
#print('---- quant o: {}'.format(node.name))
output_name = node.name
#print('qmode = %d, q_end: %d activation: %s' %
# (quant_mode, is_quant_end, output_name))
if quant_mode == 2:
datatype = 'int'
if NndctOption.nndct_only_int_quant.value is False:
datatype = quantizer.get_quant_dtype(node.name, tensor_type='output')
channel_scale = quantizer.quantize(
channel_scale, output_name, node, tensor_type='output', datatype=datatype)
return channel_scale | null |
23,176 | import numpy as np
import math
def max(data, name='', quantizer=None):
return data.max() | null |
23,177 | import numpy as np
import math
def min(data, name='', quantizer=None):
return data.min() | null |
23,178 | import numpy as np
import math
def quant_diff_s(data, bitwidth, range, round_method=2, name='',
quantizer=None):
raise NotImplementedError("please implement the diffs operation") | null |
23,179 | import numpy as np
import math
def nonlin(data, alpha, signed):
if signed:
return np.clip(data, -alpha, alpha)
else:
return np.clip(data, 0, alpha) | null |
23,180 | import numpy as np
import math
def pact_quant_neuron(data,
bitw,
bita,
alpha_init_value=None,
signed=False,
trainable=True,
warmup=False,
name='',
tensor_type='act',
quantizer=None):
raise NotImplementedError("please implement the pact_quant_neuron operation") | null |
23,181 | import numpy as np
import math
def graffitist_quant_neuron(data, bn, fp, method=2, name=''):
raise NotImplementedError(
"please implement the lowbit_quant_neuron operation") | null |
23,182 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.base.key_names import NNDCT_OP
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import base_tensor
class DataFormatMap(object):
"""A dict mapping of framework and op type to its data format.
"""
_blob_format_map = {
FrameworkType.NNDCT: {
2: "NH",
3: "NLC",
4: "NHWC",
5: "NHWDC"
},
FrameworkType.TORCH: {
2: "NH",
3: "NCL",
4: "NCHW",
5: "NCDHW"
},
# TF format generated in runtime.
}
_parameter_format_map = {
FrameworkType.NNDCT: {
2: "OI",
3: "OLI",
4: "OHWI",
5: "OHWDI"
},
FrameworkType.TORCH: {
2: "OI",
3: "OIL",
4: "OIHW",
5: "OIDHW"
},
FrameworkType.TENSORFLOW: {
2: "IO",
3: "LIO",
4: "HWIO",
5: "DHWIO",
}
}
def blob_format(cls, framework_type, ndim):
if framework_type not in cls._blob_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._blob_format_map[framework_type][ndim]
def param_format(cls, framework_type, ndim):
if framework_type not in cls._parameter_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._parameter_format_map[framework_type][ndim]
def layout_transformer(src_layout, dst_layout):
assert len(src_layout) == len(dst_layout)
axes = []
for axis in dst_layout:
axes.append(src_layout.index(axis))
return tuple(axes)
def param_layout_transformer(src_framework, dst_framework, ndim):
src_format = DataFormatMap.param_format(src_framework, ndim)
dst_format = DataFormatMap.param_format(dst_framework, ndim)
return layout_transformer(src_format, dst_format) | null |
23,183 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.base.key_names import NNDCT_OP
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import base_tensor
class DataFormatMap(object):
"""A dict mapping of framework and op type to its data format.
"""
_blob_format_map = {
FrameworkType.NNDCT: {
2: "NH",
3: "NLC",
4: "NHWC",
5: "NHWDC"
},
FrameworkType.TORCH: {
2: "NH",
3: "NCL",
4: "NCHW",
5: "NCDHW"
},
# TF format generated in runtime.
}
_parameter_format_map = {
FrameworkType.NNDCT: {
2: "OI",
3: "OLI",
4: "OHWI",
5: "OHWDI"
},
FrameworkType.TORCH: {
2: "OI",
3: "OIL",
4: "OIHW",
5: "OIDHW"
},
FrameworkType.TENSORFLOW: {
2: "IO",
3: "LIO",
4: "HWIO",
5: "DHWIO",
}
}
def blob_format(cls, framework_type, ndim):
if framework_type not in cls._blob_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._blob_format_map[framework_type][ndim]
def param_format(cls, framework_type, ndim):
if framework_type not in cls._parameter_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._parameter_format_map[framework_type][ndim]
def layout_transformer(src_layout, dst_layout):
assert len(src_layout) == len(dst_layout)
axes = []
for axis in dst_layout:
axes.append(src_layout.index(axis))
return tuple(axes)
def convert_blob_tensor_format(tensor: base_tensor.Tensor, src_framework: str,
dst_framework: str) -> base_tensor.Tensor:
if not isinstance(tensor, base_tensor.Tensor):
raise TypeError("'tensor' must be Tensor, but given {}".format(
type(tensor)))
if not tensor.is_complete_tensor():
return tensor
if src_framework == dst_framework:
return tensor
if tensor.ndim not in DataFormatMap._blob_format_map[src_framework].keys():
return tensor
src_layout = DataFormatMap.blob_format(src_framework, tensor.ndim)
dst_layout = DataFormatMap.blob_format(dst_framework, tensor.ndim)
if src_layout == dst_layout:
return tensor
tensor.transpose(layout_transformer(src_layout, dst_layout))
return tensor | null |
23,184 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.base.key_names import NNDCT_OP
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import base_tensor
class DataFormat(object):
channel_first = "channel first"
channel_last = "channel_last"
The provided code snippet includes necessary dependencies for implementing the `transformed_axis` function. Write a Python function `def transformed_axis(src: str, dst: str, ndim: int, dim: int) -> int` to solve the following problem:
NC* -> N*C/ N*C ->NC*
Here is the function:
def transformed_axis(src: str, dst: str, ndim: int, dim: int) -> int:
"""NC* -> N*C/ N*C ->NC*"""
if ndim is None or ndim not in [4, 5] or src == dst:
return dim
# NCHW -> NHWC / NHWC - > NCHW
if ndim == 4:
if src == DataFormat.channel_first and dst == DataFormat.channel_last:
return dim + [0, 2, -1, -1][dim]
elif src == DataFormat.channel_last and dst == DataFormat.channel_first:
return dim + [0, 1, 1, -2][dim]
# NCDHW -> NHWDC / NHWDC -> NCDHW
elif ndim == 5:
if src == DataFormat.channel_first and dst == DataFormat.channel_last:
return dim + [0, 3, 1, -2, -2][dim]
elif src == DataFormat.channel_last and dst == DataFormat.channel_first:
return dim + [0, 2, 2, -1, -3][dim] | NC* -> N*C/ N*C ->NC* |
23,185 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.base.key_names import NNDCT_OP
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import base_tensor
def permute_data(data, order):
if order is None or (not isinstance(data, np.ndarray)):
return data
if len(order) != data.ndim:
raise RuntimeError("The data dimensions should consistent with length of order")
return np.transpose(data, order) | null |
23,186 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.base.key_names import NNDCT_OP
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import base_tensor
def permute_axes(axes, order):
if order is None:
return axes
if len(axes) != len(order):
raise RuntimeError("The data shape should consistent with length of order")
new_axes = [None] * len(axes)
for i, j in enumerate(order):
new_axes[i] = axes[j]
return new_axes | null |
23,187 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.base.key_names import NNDCT_OP
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import base_tensor
def combine_orders(order1, order2):
new_order = len(order1) * [None]
for i in range(len(order1)):
t_i = order1.index(i)
new_order[i] = order2.index(t_i)
return new_order | null |
23,188 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
basename,
line)
return s
def get_logger(name=None, level=None, file_name=None, only2file=False):
"""Return logger instance."""
# global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
# if _logger:
# return _logger
_logger_lock.acquire()
try:
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger(name)
if level:
logger.setLevel(_logging.INFO)
else:
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
if not only2file and all([not isinstance(hdler, _logging.StreamHandler) for hdler in logger.handlers]):
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
# _handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
if file_name is not None:
_file_handler = _logging.FileHandler(file_name)
logger.addHandler(_file_handler)
return logger
finally:
_logger_lock.release()
def warn(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(WARN)}
get_logger().warning(msg, *args, extra=extra, **kwargs) | null |
23,189 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
basename,
line)
return s
def get_logger(name=None, level=None, file_name=None, only2file=False):
"""Return logger instance."""
# global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
# if _logger:
# return _logger
_logger_lock.acquire()
try:
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger(name)
if level:
logger.setLevel(_logging.INFO)
else:
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
if not only2file and all([not isinstance(hdler, _logging.StreamHandler) for hdler in logger.handlers]):
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
# _handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
if file_name is not None:
_file_handler = _logging.FileHandler(file_name)
logger.addHandler(_file_handler)
return logger
finally:
_logger_lock.release()
def fatal(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(FATAL)}
get_logger().fatal(msg, *args, extra=extra, **kwargs) | null |
23,190 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def get_logger(name=None, level=None, file_name=None, only2file=False):
"""Return logger instance."""
# global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
# if _logger:
# return _logger
_logger_lock.acquire()
try:
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger(name)
if level:
logger.setLevel(_logging.INFO)
else:
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
if not only2file and all([not isinstance(hdler, _logging.StreamHandler) for hdler in logger.handlers]):
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
# _handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
if file_name is not None:
_file_handler = _logging.FileHandler(file_name)
logger.addHandler(_file_handler)
return logger
finally:
_logger_lock.release()
The provided code snippet includes necessary dependencies for implementing the `get_verbosity` function. Write a Python function `def get_verbosity()` to solve the following problem:
Return how much logging output will be produced.
Here is the function:
def get_verbosity():
"""Return how much logging output will be produced."""
return get_logger().getEffectiveLevel() | Return how much logging output will be produced. |
23,191 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def get_logger(name=None, level=None, file_name=None, only2file=False):
"""Return logger instance."""
# global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
# if _logger:
# return _logger
_logger_lock.acquire()
try:
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger(name)
if level:
logger.setLevel(_logging.INFO)
else:
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
if not only2file and all([not isinstance(hdler, _logging.StreamHandler) for hdler in logger.handlers]):
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
# _handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
if file_name is not None:
_file_handler = _logging.FileHandler(file_name)
logger.addHandler(_file_handler)
return logger
finally:
_logger_lock.release()
The provided code snippet includes necessary dependencies for implementing the `set_verbosity` function. Write a Python function `def set_verbosity(v)` to solve the following problem:
Sets the threshold for what messages will be logged.
Here is the function:
def set_verbosity(v):
"""Sets the threshold for what messages will be logged."""
get_logger().setLevel(v) | Sets the threshold for what messages will be logged. |
23,192 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def log(level, msg, *args, **kwargs):
def min_vlog_level():
def vlog(level, msg, *args, **kwargs):
if level <= min_vlog_level():
log(level, msg, *args, **kwargs) | null |
23,193 | def set_kwargs_or_defaults(obj, kwargs, default_attrs=None, keys=None):
if default_attrs:
keys = keys or default_attrs.keys()
attrs_dict = get_kwargs_or_defaults(kwargs, default_attrs, keys)
else:
keys = keys or kwargs.keys()
attrs_dict = kwargs
for k, v in attrs_dict.items():
setattr(obj, k, v)
def nndct_pre_processing(init_func):
def wrapper(obj, *args, **kwargs):
if hasattr(obj, 'default_kwargs'):
set_kwargs_or_defaults(obj, kwargs, obj.default_kwargs)
if hasattr(obj, 'indexed_title'):
assert isinstance(obj.indexed_title, list)
for key in obj.indexed_title:
setattr(obj, 'POS_' + key.upper(), obj.indexed_title.index(key))
if hasattr(obj, 'default_commanders'):
for k, v in obj.default_commanders.items():
k.register(obj, v)
init_func(obj, *args, **kwargs)
return wrapper | null |
23,194 |
def not_implement(func):
def wrapper(obj, *args, **kwargs):
func(obj, *args, **kwargs)
raise NotImplemented("{} {}".format(obj, func.__name__))
return wrapper | null |
23,195 | import numpy as np
from nndct_shared.base import NNDCT_OP
def get_batchnorm_params(param_list, param_getter, center=True, scale=True):
#order: gamma,beta,mean,var
if all(param_getter(p) is not None for p in param_list):
param_shape = param_getter(param_list[-1]).shape
bn_params = []
if center and scale:
bn_params = [param_getter(p) for p in param_list]
elif center:
bn_params = [np.ones(param_shape),
param_getter(param_list[0])
] + [param_getter(p) for p in param_list[-2:]]
elif scale:
bn_params = [param_getter(node.op.params[0]),
np.zeros(param_shape)
] + [param_getter(p) for p in param_list[-2:]]
if len(bn_params) == 2:
#no mean and var
bn_params.extend([np.zeros(param_shape), np.ones(param_shape)])
else:
bn_params = [None] * 4
assert len(
bn_params
) == 4, "batch norm should has 4 variables: gamma, beta, mean, var, please check!"
return bn_params | null |
23,196 | import numpy as np
from nndct_shared.base import NNDCT_OP
def get_batchnorm_param_names(param_list, center=True, scale=True):
if center and scale:
assert len(
param_list) == 4, "expect 4 parameters names, got " + str(param_list)
return {
'gamma': param_list[0],
'beta': param_list[1],
'mean': param_list[2],
'var': param_list[3]
}
elif center:
assert len(
param_list) == 3, "expect 3 parameters names, got " + str(param_list)
return {'beta': param_list[0], 'mean': param_list[1], 'var': param_list[2]}
elif scale:
assert len(
param_list) == 3, "expect 3 parameters names, got " + str(param_list)
return {'gamma': param_list[0], 'mean': param_list[1], 'var': param_list[2]} | null |
23,197 | import numpy as np
from nndct_shared.base import NNDCT_OP
def get_in_out_channel_idx(ndim, optype, data_formats):
#TODO: same shape with different format, is this possible?
if ndim == 1:
return 0, 0
if optype == NNDCT_OP.CONV2D:
if data_formats[optype] == 'HWIO':
in_idx, out_idx = 2, 3
elif data_formats[optype] == 'OIHW':
in_idx, out_idx = 1, 0
else:
raise Exception("data format of conv2d kernel {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
elif optype == NNDCT_OP.DEPTHWISE_CONV2D:
if data_formats[optype] == 'HWIO':
in_idx, out_idx = 2, 2
elif data_formats[optype] == 'OIHW':
in_idx, out_idx = 1, 1
else:
raise Exception(
"data format of depthwise_conv2d kernel {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
elif optype in [NNDCT_OP.DENSE, NNDCT_OP.BASIC_LSTM]:
if data_formats[optype] == 'IO':
in_idx, out_idx = 0, 1
elif data_formats[optype] == 'OI':
in_idx, out_idx = 1, 0
else:
raise Exception("data format of 2 dim mat {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
else:
raise Exception("unexpected optype: " + str(optype))
return in_idx, out_idx
def get_tensor_out_dim(tensor, optype, data_formats):
_, out_idx = get_in_out_channel_idx(tensor.ndim, optype, data_formats)
return tensor.shape[out_idx] | null |
23,198 | import numpy as np
from nndct_shared.base import NNDCT_OP
def get_in_out_channel_idx(ndim, optype, data_formats):
#TODO: same shape with different format, is this possible?
if ndim == 1:
return 0, 0
if optype == NNDCT_OP.CONV2D:
if data_formats[optype] == 'HWIO':
in_idx, out_idx = 2, 3
elif data_formats[optype] == 'OIHW':
in_idx, out_idx = 1, 0
else:
raise Exception("data format of conv2d kernel {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
elif optype == NNDCT_OP.DEPTHWISE_CONV2D:
if data_formats[optype] == 'HWIO':
in_idx, out_idx = 2, 2
elif data_formats[optype] == 'OIHW':
in_idx, out_idx = 1, 1
else:
raise Exception(
"data format of depthwise_conv2d kernel {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
elif optype in [NNDCT_OP.DENSE, NNDCT_OP.BASIC_LSTM]:
if data_formats[optype] == 'IO':
in_idx, out_idx = 0, 1
elif data_formats[optype] == 'OI':
in_idx, out_idx = 1, 0
else:
raise Exception("data format of 2 dim mat {} is not supported".format(
data_formats[NNDCT_OP.CONV2D]))
else:
raise Exception("unexpected optype: " + str(optype))
return in_idx, out_idx
def get_tensor_in_dim(tensor, optype, data_formats):
in_idx, _ = get_in_out_channel_idx(tensor.ndim, optype, data_formats)
return tensor.shape[in_idx] | null |
23,199 | import numpy as np
from nndct_shared.base import NNDCT_OP
def delete_in_out_channel_indexs(data,
in_idx=None,
out_idx=None,
in_channel_array=None,
out_channel_array=None):
if in_idx is not None and in_channel_array is not None and not (
in_idx == out_idx and out_channel_array is not None):
data = np.delete(data, in_channel_array, axis=in_idx)
if out_idx is not None and out_channel_array is not None:
data = np.delete(data, out_channel_array, axis=out_idx)
return data | null |
23,200 | import numpy as np
from nndct_shared.base import NNDCT_OP
def insert_in_out_channel_indexs(data,
in_idx=None,
out_idx=None,
in_channel_array=None,
out_channel_array=None):
if in_idx is not None and in_channel_array is not None and not (
in_idx == out_idx and out_channel_array is not None):
for pos in sorted(in_channel_array.tolist()):
data = np.insert(data, pos, 0, axis=in_idx)
if out_idx is not None and out_channel_array is not None:
for pos in sorted(out_channel_array.tolist()):
data = np.insert(data, pos, 0, axis=out_idx)
return data | null |
23,201 | import numpy as np
from nndct_shared.base import NNDCT_OP
def expand_in_out_channel_indexs(data,
in_idx=None,
out_idx=None,
in_channel_array=None,
out_channel_array=None):
# assert len(data.shape) in [1,2,4], 'unexpected param data shape'
in_dim = None
out_dim = None
if in_channel_array is not None and in_idx is not None and not (
in_idx == out_idx and out_channel_array is not None):
in_dim = data.shape[in_idx] + len(in_channel_array)
if out_idx is not None and out_channel_array is not None:
out_dim = data.shape[out_idx] + len(out_channel_array)
assert in_dim is not None or out_dim is not None
expand_shape = [0] * len(data.shape)
expand_idxs = [0] * len(data.shape)
for idx, dim in enumerate(data.shape):
if in_dim is not None and idx == in_idx:
expand_shape[idx] = in_dim
idx_in_channel = sorted(
np.array(list(set(range(in_dim)) - set(in_channel_array))))
expand_idxs[idx] = idx_in_channel
elif out_dim is not None and idx == out_idx:
expand_shape[idx] = out_dim
idx_out_channel = sorted(
np.array(list(set(range(out_dim)) - set(out_channel_array))))
expand_idxs[idx] = idx_out_channel
else:
expand_shape[idx] = dim
expand_idxs[idx] = np.array(range(dim))
expand_data = np.zeros(expand_shape, dtype=data.dtype)
expand_data[np.ix_(*expand_idxs)] = data
return expand_data | null |
23,202 | from typing import TypeVar, NoReturn, Optional, Iterator, List
from .option_list import NndctOption
from .option_def import Option, T
class NndctOption(object):
nndct_help = Option(name="help", dtype=bool, default=False, action="store_true",
help="list all api usage description")
nndct_quant_off = Option(name="quant_off", dtype=bool, default=False, action="store_true",
help="disable quantization flow")
nndct_option_list = Option(name="option_list", dtype=bool, default=False, action="store_true",
help="list all the options in nndct")
nndct_parse_debug = Option(name="parse_debug", dtype=int, default=0, env="NNDCT_PARSE_DEBUG",
help="logging graph, 1: torch raw graph, 2: nndct graph 3: nndct quant graph")
nndct_logging_level = Option(name="logging_level", dtype=int, default=0, help="logging level")
nndct_quant_mode = Option(name="quant_mode", dtype=int, default=0,
help="quant mode, 1:calibration, 2:quantization")
nndct_dump_float_format = Option(name="dump_float_format", dtype=int, default=0,
help="deploy check data format, 0: bin, 1: txt")
nndct_record_slow_mode = Option(name="record_slow_mode", dtype=bool, default=False, action="store_true",
help="record outputs every iteration")
nndct_quant_opt = Option(name="quant_opt", dtype=int, default=3, help="quant opt level")
nndct_relu6_replace = Option(name="relu6_replace", dtype=str, default='relu', help="relu6 replace operator")
nndct_equalization = Option(name="equalization", dtype=bool, default=True, action="store_true",
help="enable weights equalization")
# nndct_wes = Option(name="weights_equalizing_shift", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift")
# nndct_wes_in_cle = Option(name="weights_equalizing_shift in cle", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift in cle")
nndct_param_corr = Option(name="param_corr", dtype=bool, default=True, action="store_true",
help="enable parameter correction")
nndct_param_corr_rate = Option(name="param_corr_rate", dtype=float, default=0.05, help="parameter correction rate")
nndct_cv_app = Option(name="cv_app", dtype=bool, default=True, action="store_true", help="cv application")
nndct_finetune_lr_factor = Option(name="finetune_lr_factor", dtype=float, default=0.01, help="finetune learning rate factor")
nndct_partition_mode = Option(name="partition_mode", dtype=int, default=0,
help="0: quant stub controled. 1: custom op controled")
nndct_stat = Option(name="stat", dtype=int, default=0, help="quantizer statistic level")
nndct_jit_script_mode = Option(name="jit_script_mode", dtype=bool, default=False, action="store_true", help="enable torch script parser")
nndct_diffs_mode = Option(name="diffs_mode", dtype=str, default='mse', help="diffs_mode: mse, maxmin")
nndct_ft_mode = Option(name="ft_mode", dtype=int, default=1, help="1: mix mode 0: cache mode")
nndct_visualize = Option(name="visualize", dtype=bool, default=False, action="store_true", help="visualize tensors")
nndct_dump_no_quant_part = Option(name="dump_no_quant_part", dtype=bool, default=False, action="store_true", help="dump no quantized nodes")
nndct_max_fix_position = Option(name="max_fix_position", dtype=int, default=12, help="maximum of fix position")
nndct_use_torch_quantizer = Option(name="use_torch_quantizer", dtype=bool, default=False, action="store_true", help="enable torch quantizer")
nndct_jit_trace = Option(name="jit_trace", dtype=bool, default=False, action="store_true", env="NNDCT_JIT_TRACE", help="parse graph from script tracing")
nndct_jit_script = Option(name="jit_script", dtype=bool, default=False, action="store_true", help="parse graph from script")
nndct_calib_histogram_bins = Option(name="calib_histogram_bins", dtype=int, default=2048, help="calibration histogram bins number")
nndct_mse_start_bin = Option(name="mse_start_bin", dtype=int, default=1536, help="mse calibration method start bin")
nndct_mse_stride = Option(name="mse_stride", dtype=int, default=16, help="mse calibration method stride")
nndct_entropy_start_bin = Option(name="entropy_start_bin", dtype=int, default=1536, help="entropy calibration method start bin")
nndct_entropy_stride = Option(name="entropy_stride", dtype=int, default=16, help="entropy calibration method stride")
nndct_convert_relu6_to_relu = Option(name="convert_relu6_to_relu", dtype=bool, default=False, help="convert relu6 to relu")
nndct_convert_sigmoid_to_hsigmoid = Option(name="convert_sigmoid_to_hsigmoid", dtype=bool, default=False, action="store_true", help="convert sigmoid to hsigmoid")
nndct_convert_silu_to_hswish = Option(name="convert_silu_to_hswish", dtype=bool, default=False, action="store_true", help="convert silu to hswish")
nndct_keep_first_last_layer_accuracy = Option(name="keep_first_last_layer_accuracy", dtype=bool, default=False, help="keep accuracy of first and last layer")
nndct_keep_add_layer_accuracy = Option(name="keep_add_layer_accuracy", dtype=bool, default=False, help="keep accuracy of add layer")
nndct_avg_pool_approximate = Option(name="avg_pool_approximate", dtype=bool, default=True, action="store_true", help="enable average pooling approximate for dpu")
nndct_leaky_relu_approximate = Option(name="leaky_relu_approximate", dtype=bool, default=True, action="store_true", help="enable leaky relu approximate for dpu")
nndct_conv_bn_merge = Option(name="conv_bn_merge", dtype=bool, default=True, action="store_true", help="enable conv and bn merge")
nndct_input_quant_only = Option(name="input_quant_only", dtype=bool, default=False, action="store_false", help="only quantize the input")
nndct_tensorrt_strategy = Option(name="tensorrt_strategy", dtype=bool, default=False, action="store_true", help="use quantization strategy as tensorrt")
nndct_tensorrt_quant_algo = Option(name="tensorrt_quant_algo", dtype=bool, default=False, action="store_true", help="use tensorrt quantization algorithm")
nndct_calibration_local = Option(name="calibration_local", dtype=bool, default=True, action="store_true", help="calibration in local batch data")
nndct_change_concat_input_fix = Option(name="change_concat_input_fix", dtype=bool, default=False, action="store_true", help="change concat input nodes fix point to be the same as concat output node")
nndct_change_pool_input_fix = Option(name="change_pool_input_fix", dtype=bool, default=False, action="store_true", help="change pooling input nodes fix point to be the same as their output node")
nndct_change_add_input_fix = Option(name="change_add_input_fix", dtype=bool, default=False, action="store_true", help="change add input nodes fix point to be the identical")
nndct_insert_concat_input_fix = Option(name="insert_concat_input_fix", dtype=bool, default=False, action="store_true", help="insert concat input nodes fix point to be the same as concat output node")
nndct_export_jit = Option(name="export_jit", dtype=bool, default=False, action="store_true", env="NNDCT_EXPORT_JIT", help="export quant script by inserting fixneuron")
nndct_deploy_check = Option(name="deploy_check", dtype=bool, default=False, action="store_true", help="dump deploy data in forward process")
nndct_input_check = Option(name="input_check", dtype=bool, default=False, action="store_true", help="dump input float data in forward process")
nndct_op_tanh_sigmoid_mode = Option(name="tanh_sigmoid_mode", dtype=str, default='quant_input_output', help="Tanh/sigmoid quantization mode: quant_input_output, table_look_up, simulation, aie2_lut_16bw")
nndct_op_softmax_mode = Option(name="softmax_mode", dtype=str, default='quant_input_output', help="Softmax quantization mode: quant_input_output, hardware_pl, liyi, aie2_lut_16bw, bert_8bw, ipu_8bw")
nndct_op_logsoftmax_mode = Option(name="logsoftmax_mode", dtype=str, default='quant_input_output', help="Logsoftmax quantization mode: quant_input_output, aie2_lut_16bw")
nndct_op_gelu_mode = Option(name="gelu_mode", dtype=str, default='quant_input_output', help="GELU quantization mode: quant_input_output, dynamic_table")
nndct_op_layernorm_mode = Option(name="layernorm_mode", dtype=str, default='quant_input_output', help="Layernorm quantization mode: quant_input_output, aie2_16bw, bert_8bw")
nndct_ip_asr = Option(name="ip_asr", dtype=bool, default=False, action="store_true", help="asr quant method")
nndct_ip_v70_bert = Option(name="ip_v70_bert", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_ip_v70_bert_qat = Option(name="ip_v70_bert_qat", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_use_old_inspector = Option(name="use_old_inspector", dtype=bool, default=False, action="store_true", env="NNDCT_USE_OLD_INSPECTOR", help="switch to old inspector")
nndct_calib_before_finetune = Option(name="calib_before_finetune", dtype=bool, default=False, action="store_true", help="calibration before fast finetune")
nndct_inspect_debug = Option(name="inspect_debug", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_DEBUG", help="turn on inspector")
nndct_op_instancenorm_mode = Option(name="instancenorm_mode", dtype=str, default='quant_input_output', help="Instancenorm quantization mode: quant_input_output, ipu_8bw")
nndct_op_groupnorm_mode = Option(name="groupnorm_mode", dtype=str, default='quant_input_output', help="Groupnorm quantization mode: quant_input_output, ipu_8bw")
nndct_native_onnx = Option(name="native_onnx", dtype=bool, default=False, action="store_true", help="export native quant-dequant onnx models")
nndct_inspect_test = Option(name="inspect_test", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_TEST", help="embed target related test in torch quantizer")
nndct_target = Option(name="target", dtype=str, default="", env="NNDCT_TARGET", help="target name")
nndct_traversal_graph_mode = Option(name="nndct_traversal_graph_mode", dtype=int, default=0, env="NNDCT_GRAPH_SEARCH",help="0: auto, 1:recursion, 2: iteration")
nndct_op_sqrt_mode = Option(name="sqrt_mode", dtype=str, default='quant_input_output', help="sqrt quantization mode: quant_input_output, ipu_8bw")
nndct_onnx_opset_version = Option(name="onnx_opset_version", dtype=int, default=-1, help="opset_version of dumped onnx graph")
nndct_only_int_quant = Option(name="only_int_quant", dtype=bool, default=True, help="only int datatype quantization included")
nndct_gemm88 = Option(name="gemm88", dtype=bool, default=False, action="store_true", help="only quant gemm88 and matmul88")
nndct_pooling_split_mode = Option(name="nndct_pooling_split_mode", dtype=bool, default=False, help="default big pooling will split to small pooling")
nndct_fx_mode = Option(name="fx_mode", dtype=bool, default=False, action="store_true", env="NNDCT_FX_MODE", help="turn on fx mode")
class Option(object):
"""NNDCT option definition.
Attribute:
name(str): option name
dtype(str, int, float, bool): option type
default(T): default value of option
action(str): 'store_true' / 'store_false' only work when dtype is 'bool' [default=None]
help(str): description of option [default=None]
framework(str): 'torch' / 'tensorflow' / 'all' [default='all']
Raises:
DefineOptionError
"""
def __init__(self, name: str, dtype: type, default: T, action: Optional[str] = None, framework: str = "all", help: Optional[str] = None, env=None):
self._name = _OPTION_PREFFIX + name
self._dtype = dtype
self._default = default
self._action = action
self._framework = framework
self._help = help
self._env = env
try:
self._check_attribute_validataion_()
except DefineOptionError as e:
print(e)
_sys.exit(1)
def __str__(self):
return f"""--{self._name} : {self._help} (default={self._default})"""
def _check_attribute_validataion_(self):
if self._dtype not in [str, int, float, bool]:
raise DefineOptionError(self._name, msg=r"The dtype should be 'int/float/bool/string'.")
if self._action not in [None, "store_true", "store_false"]:
raise DefineOptionError(self._name, msg=r"The action value should be ''store_true' / 'store_false''.")
if self._framework not in ["tensorflow", "torch", "all"]:
raise DefineOptionError(self._name, msg=r"The framewok should be ''tensorflow''/''torch''/''all''.")
if type(self._default) != self._dtype:
raise DefineOptionError(self._name, msg=r"The default value type should be the same with dtype.")
if self._dtype != bool and self._action is not None:
raise DefineOptionError(self._name, msg=r"The action is only valid for bool type option.")
def get_env_value(self):
if self._dtype == str:
return os.getenv(self._env, default=self._default)
elif self._dtype in [int, float]:
data = os.getenv(self._env, default=self._default)
return self._dtype(data)
elif self._dtype == bool:
data = os.getenv(self._env)
if data is None:
return self._default
else:
return {"true": True,
"false": False,
"0": False}.get(data.lower(), True)
def dtype(self):
return self._dtype
def action(self):
return self._action
def framework(self):
return self._framework
def value(self):
if hasattr(self, '_value'):
return self._value
elif self._env is not None:
return self.get_env_value()
else:
return self._default
def value(self, value):
if value is None:
self._value = True if self._action == "store_true" else False
else:
self._value = value
def get_all_options() ->Iterator:
for _, option in NndctOption.__dict__.items():
if isinstance(option, Option):
yield option | null |
23,203 | from typing import TypeVar, NoReturn, Optional, Iterator, List
from .option_list import NndctOption
from .option_def import Option, T
class NndctOption(object):
nndct_help = Option(name="help", dtype=bool, default=False, action="store_true",
help="list all api usage description")
nndct_quant_off = Option(name="quant_off", dtype=bool, default=False, action="store_true",
help="disable quantization flow")
nndct_option_list = Option(name="option_list", dtype=bool, default=False, action="store_true",
help="list all the options in nndct")
nndct_parse_debug = Option(name="parse_debug", dtype=int, default=0, env="NNDCT_PARSE_DEBUG",
help="logging graph, 1: torch raw graph, 2: nndct graph 3: nndct quant graph")
nndct_logging_level = Option(name="logging_level", dtype=int, default=0, help="logging level")
nndct_quant_mode = Option(name="quant_mode", dtype=int, default=0,
help="quant mode, 1:calibration, 2:quantization")
nndct_dump_float_format = Option(name="dump_float_format", dtype=int, default=0,
help="deploy check data format, 0: bin, 1: txt")
nndct_record_slow_mode = Option(name="record_slow_mode", dtype=bool, default=False, action="store_true",
help="record outputs every iteration")
nndct_quant_opt = Option(name="quant_opt", dtype=int, default=3, help="quant opt level")
nndct_relu6_replace = Option(name="relu6_replace", dtype=str, default='relu', help="relu6 replace operator")
nndct_equalization = Option(name="equalization", dtype=bool, default=True, action="store_true",
help="enable weights equalization")
# nndct_wes = Option(name="weights_equalizing_shift", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift")
# nndct_wes_in_cle = Option(name="weights_equalizing_shift in cle", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift in cle")
nndct_param_corr = Option(name="param_corr", dtype=bool, default=True, action="store_true",
help="enable parameter correction")
nndct_param_corr_rate = Option(name="param_corr_rate", dtype=float, default=0.05, help="parameter correction rate")
nndct_cv_app = Option(name="cv_app", dtype=bool, default=True, action="store_true", help="cv application")
nndct_finetune_lr_factor = Option(name="finetune_lr_factor", dtype=float, default=0.01, help="finetune learning rate factor")
nndct_partition_mode = Option(name="partition_mode", dtype=int, default=0,
help="0: quant stub controled. 1: custom op controled")
nndct_stat = Option(name="stat", dtype=int, default=0, help="quantizer statistic level")
nndct_jit_script_mode = Option(name="jit_script_mode", dtype=bool, default=False, action="store_true", help="enable torch script parser")
nndct_diffs_mode = Option(name="diffs_mode", dtype=str, default='mse', help="diffs_mode: mse, maxmin")
nndct_ft_mode = Option(name="ft_mode", dtype=int, default=1, help="1: mix mode 0: cache mode")
nndct_visualize = Option(name="visualize", dtype=bool, default=False, action="store_true", help="visualize tensors")
nndct_dump_no_quant_part = Option(name="dump_no_quant_part", dtype=bool, default=False, action="store_true", help="dump no quantized nodes")
nndct_max_fix_position = Option(name="max_fix_position", dtype=int, default=12, help="maximum of fix position")
nndct_use_torch_quantizer = Option(name="use_torch_quantizer", dtype=bool, default=False, action="store_true", help="enable torch quantizer")
nndct_jit_trace = Option(name="jit_trace", dtype=bool, default=False, action="store_true", env="NNDCT_JIT_TRACE", help="parse graph from script tracing")
nndct_jit_script = Option(name="jit_script", dtype=bool, default=False, action="store_true", help="parse graph from script")
nndct_calib_histogram_bins = Option(name="calib_histogram_bins", dtype=int, default=2048, help="calibration histogram bins number")
nndct_mse_start_bin = Option(name="mse_start_bin", dtype=int, default=1536, help="mse calibration method start bin")
nndct_mse_stride = Option(name="mse_stride", dtype=int, default=16, help="mse calibration method stride")
nndct_entropy_start_bin = Option(name="entropy_start_bin", dtype=int, default=1536, help="entropy calibration method start bin")
nndct_entropy_stride = Option(name="entropy_stride", dtype=int, default=16, help="entropy calibration method stride")
nndct_convert_relu6_to_relu = Option(name="convert_relu6_to_relu", dtype=bool, default=False, help="convert relu6 to relu")
nndct_convert_sigmoid_to_hsigmoid = Option(name="convert_sigmoid_to_hsigmoid", dtype=bool, default=False, action="store_true", help="convert sigmoid to hsigmoid")
nndct_convert_silu_to_hswish = Option(name="convert_silu_to_hswish", dtype=bool, default=False, action="store_true", help="convert silu to hswish")
nndct_keep_first_last_layer_accuracy = Option(name="keep_first_last_layer_accuracy", dtype=bool, default=False, help="keep accuracy of first and last layer")
nndct_keep_add_layer_accuracy = Option(name="keep_add_layer_accuracy", dtype=bool, default=False, help="keep accuracy of add layer")
nndct_avg_pool_approximate = Option(name="avg_pool_approximate", dtype=bool, default=True, action="store_true", help="enable average pooling approximate for dpu")
nndct_leaky_relu_approximate = Option(name="leaky_relu_approximate", dtype=bool, default=True, action="store_true", help="enable leaky relu approximate for dpu")
nndct_conv_bn_merge = Option(name="conv_bn_merge", dtype=bool, default=True, action="store_true", help="enable conv and bn merge")
nndct_input_quant_only = Option(name="input_quant_only", dtype=bool, default=False, action="store_false", help="only quantize the input")
nndct_tensorrt_strategy = Option(name="tensorrt_strategy", dtype=bool, default=False, action="store_true", help="use quantization strategy as tensorrt")
nndct_tensorrt_quant_algo = Option(name="tensorrt_quant_algo", dtype=bool, default=False, action="store_true", help="use tensorrt quantization algorithm")
nndct_calibration_local = Option(name="calibration_local", dtype=bool, default=True, action="store_true", help="calibration in local batch data")
nndct_change_concat_input_fix = Option(name="change_concat_input_fix", dtype=bool, default=False, action="store_true", help="change concat input nodes fix point to be the same as concat output node")
nndct_change_pool_input_fix = Option(name="change_pool_input_fix", dtype=bool, default=False, action="store_true", help="change pooling input nodes fix point to be the same as their output node")
nndct_change_add_input_fix = Option(name="change_add_input_fix", dtype=bool, default=False, action="store_true", help="change add input nodes fix point to be the identical")
nndct_insert_concat_input_fix = Option(name="insert_concat_input_fix", dtype=bool, default=False, action="store_true", help="insert concat input nodes fix point to be the same as concat output node")
nndct_export_jit = Option(name="export_jit", dtype=bool, default=False, action="store_true", env="NNDCT_EXPORT_JIT", help="export quant script by inserting fixneuron")
nndct_deploy_check = Option(name="deploy_check", dtype=bool, default=False, action="store_true", help="dump deploy data in forward process")
nndct_input_check = Option(name="input_check", dtype=bool, default=False, action="store_true", help="dump input float data in forward process")
nndct_op_tanh_sigmoid_mode = Option(name="tanh_sigmoid_mode", dtype=str, default='quant_input_output', help="Tanh/sigmoid quantization mode: quant_input_output, table_look_up, simulation, aie2_lut_16bw")
nndct_op_softmax_mode = Option(name="softmax_mode", dtype=str, default='quant_input_output', help="Softmax quantization mode: quant_input_output, hardware_pl, liyi, aie2_lut_16bw, bert_8bw, ipu_8bw")
nndct_op_logsoftmax_mode = Option(name="logsoftmax_mode", dtype=str, default='quant_input_output', help="Logsoftmax quantization mode: quant_input_output, aie2_lut_16bw")
nndct_op_gelu_mode = Option(name="gelu_mode", dtype=str, default='quant_input_output', help="GELU quantization mode: quant_input_output, dynamic_table")
nndct_op_layernorm_mode = Option(name="layernorm_mode", dtype=str, default='quant_input_output', help="Layernorm quantization mode: quant_input_output, aie2_16bw, bert_8bw")
nndct_ip_asr = Option(name="ip_asr", dtype=bool, default=False, action="store_true", help="asr quant method")
nndct_ip_v70_bert = Option(name="ip_v70_bert", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_ip_v70_bert_qat = Option(name="ip_v70_bert_qat", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_use_old_inspector = Option(name="use_old_inspector", dtype=bool, default=False, action="store_true", env="NNDCT_USE_OLD_INSPECTOR", help="switch to old inspector")
nndct_calib_before_finetune = Option(name="calib_before_finetune", dtype=bool, default=False, action="store_true", help="calibration before fast finetune")
nndct_inspect_debug = Option(name="inspect_debug", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_DEBUG", help="turn on inspector")
nndct_op_instancenorm_mode = Option(name="instancenorm_mode", dtype=str, default='quant_input_output', help="Instancenorm quantization mode: quant_input_output, ipu_8bw")
nndct_op_groupnorm_mode = Option(name="groupnorm_mode", dtype=str, default='quant_input_output', help="Groupnorm quantization mode: quant_input_output, ipu_8bw")
nndct_native_onnx = Option(name="native_onnx", dtype=bool, default=False, action="store_true", help="export native quant-dequant onnx models")
nndct_inspect_test = Option(name="inspect_test", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_TEST", help="embed target related test in torch quantizer")
nndct_target = Option(name="target", dtype=str, default="", env="NNDCT_TARGET", help="target name")
nndct_traversal_graph_mode = Option(name="nndct_traversal_graph_mode", dtype=int, default=0, env="NNDCT_GRAPH_SEARCH",help="0: auto, 1:recursion, 2: iteration")
nndct_op_sqrt_mode = Option(name="sqrt_mode", dtype=str, default='quant_input_output', help="sqrt quantization mode: quant_input_output, ipu_8bw")
nndct_onnx_opset_version = Option(name="onnx_opset_version", dtype=int, default=-1, help="opset_version of dumped onnx graph")
nndct_only_int_quant = Option(name="only_int_quant", dtype=bool, default=True, help="only int datatype quantization included")
nndct_gemm88 = Option(name="gemm88", dtype=bool, default=False, action="store_true", help="only quant gemm88 and matmul88")
nndct_pooling_split_mode = Option(name="nndct_pooling_split_mode", dtype=bool, default=False, help="default big pooling will split to small pooling")
nndct_fx_mode = Option(name="fx_mode", dtype=bool, default=False, action="store_true", env="NNDCT_FX_MODE", help="turn on fx mode")
class Option(object):
"""NNDCT option definition.
Attribute:
name(str): option name
dtype(str, int, float, bool): option type
default(T): default value of option
action(str): 'store_true' / 'store_false' only work when dtype is 'bool' [default=None]
help(str): description of option [default=None]
framework(str): 'torch' / 'tensorflow' / 'all' [default='all']
Raises:
DefineOptionError
"""
def __init__(self, name: str, dtype: type, default: T, action: Optional[str] = None, framework: str = "all", help: Optional[str] = None, env=None):
self._name = _OPTION_PREFFIX + name
self._dtype = dtype
self._default = default
self._action = action
self._framework = framework
self._help = help
self._env = env
try:
self._check_attribute_validataion_()
except DefineOptionError as e:
print(e)
_sys.exit(1)
def __str__(self):
return f"""--{self._name} : {self._help} (default={self._default})"""
def _check_attribute_validataion_(self):
if self._dtype not in [str, int, float, bool]:
raise DefineOptionError(self._name, msg=r"The dtype should be 'int/float/bool/string'.")
if self._action not in [None, "store_true", "store_false"]:
raise DefineOptionError(self._name, msg=r"The action value should be ''store_true' / 'store_false''.")
if self._framework not in ["tensorflow", "torch", "all"]:
raise DefineOptionError(self._name, msg=r"The framewok should be ''tensorflow''/''torch''/''all''.")
if type(self._default) != self._dtype:
raise DefineOptionError(self._name, msg=r"The default value type should be the same with dtype.")
if self._dtype != bool and self._action is not None:
raise DefineOptionError(self._name, msg=r"The action is only valid for bool type option.")
def get_env_value(self):
if self._dtype == str:
return os.getenv(self._env, default=self._default)
elif self._dtype in [int, float]:
data = os.getenv(self._env, default=self._default)
return self._dtype(data)
elif self._dtype == bool:
data = os.getenv(self._env)
if data is None:
return self._default
else:
return {"true": True,
"false": False,
"0": False}.get(data.lower(), True)
def dtype(self):
return self._dtype
def action(self):
return self._action
def framework(self):
return self._framework
def value(self):
if hasattr(self, '_value'):
return self._value
elif self._env is not None:
return self.get_env_value()
else:
return self._default
def value(self, value):
if value is None:
self._value = True if self._action == "store_true" else False
else:
self._value = value
def add_valid_nndct_option(argv: List[str], option: str, cmd_position: int, framework: str)-> List[str]:
def _set_nndct_option(option_name: str, option_value: str) -> bool:
def _get_option_by_name() -> Optional[Option]:
return NndctOption.__dict__.get(option_name, None)
option = _get_option_by_name()
if option is None: return False
if option.framework != framework and option.framework != 'all': return False
if option.dtype == bool:
if option_value is None and option.action is None:
return False
elif option_value:
if option_value not in ["True", "False"]:
return False
option_value = True if option_value == "True" else False
option.value = option_value
return True
else:
option.value = option_value
return True
else:
try:
option_value = option.dtype(option_value)
except ValueError:
return False
else:
option.value = option_value
return True
def _is_valid_option():
return option.startswith("--")
remove_item = []
if not _is_valid_option(): return remove_item
try:
equal_symbol_idx = option.index("=")
except ValueError:
remove_next_cmd = False
option_name = option[2:]
if cmd_position == len(argv)-1:
option_value = None
elif argv[cmd_position + 1].startswith("--") or argv[cmd_position + 1].startswith("-"):
option_value = None
else:
option_value = argv[cmd_position + 1]
remove_next_cmd = True
if _set_nndct_option(option_name, option_value):
remove_item.append(option)
if remove_next_cmd: remove_item.append(option_value)
else:
if equal_symbol_idx == len(option)-1: return remove_item
option_name = option[2:equal_symbol_idx]
option_value = option[equal_symbol_idx+1:]
if _set_nndct_option(option_name, option_value):
remove_item.append(option)
return remove_item | null |
23,204 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def get_split_sym(model_type):
if model_type == 'Nndct':
return '_'
elif model_type in ['tensorflow', 'tf-keras']:
return '/'
elif model_type == 'torch':
return '.'
raise Exception("can not find split symbol for model_type " + model_type) | null |
23,205 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def default_scoped_name(obj):
if isinstance(obj, str):
name = obj
else:
name = obj.name
return '/'.join(name.split('/')[:-1]), name.split('/')[-1] | null |
23,206 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def get_default_name(obj):
if isinstance(obj, str):
return obj
name = getattr(obj, 'name', None)
if not name:
raise Exception("{} has no attribute name, please check!".format(obj))
return name.split(':')[0] | null |
23,207 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def default_legal_name(name):
return name.replace('.', 'DOT').replace('/', 'SPL') | null |
23,208 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def derive_scope_name(name, scope, split_sym, offset=0):
name_list = name.split(split_sym)
for idx in range(len(name_list)):
if name_list[idx].startswith(scope):
base_idx = idx
break
return split_sym.join(name_list[:base_idx - offset]) | null |
23,209 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def reverse_default_legal_name(name):
return name.replace('DOT', '.').replace('SPL', '/') | null |
23,210 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def remove_suffix(obj, suffix):
if obj is None:
return obj
if suffix is None or suffix == '':
return obj
if isinstance(suffix, str):
if isinstance(obj, str) and len(suffix) > 0 and obj.endswith(suffix):
obj = obj[:-len(suffix)]
elif isinstance(obj, dict):
obj = {k: remove_suffix(v, suffix) for k, v in obj.items()}
elif isinstance(obj, list):
obj = [remove_suffix(item, suffix) for item in obj]
return obj
elif isinstance(suffix, list):
for suf in suffix:
obj = remove_suffix(obj, suf)
return obj
else:
raise Exception('suffix {} is not string or list!'.format(suffix)) | null |
23,211 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def remove_trans_scp_prefix(name, scp=None):
def scoped_untrans_name(name, scp):
org_name = remove_trans_scp_prefix(name, scp)
return scp + org_name | null |
23,212 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def remove_prefix(obj, prefix):
def scoped_trans_name(name, scp):
org_name = remove_prefix(name, scp)
if org_name.startswith(NNDCT_KEYS.TRANS_SCOPE):
return scp + org_name
else:
return scp + NNDCT_KEYS.TRANS_SCOPE + '/' + org_name | null |
23,213 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def remove_trans_scp_prefix(name, scp=None):
GLOBAL_MAP = GlobalMap()
def nndct_debug_print(string, title='', level=1):
def map_output_and_node(output, node_or_name, model_type):
if node_or_name is None:
return
if isinstance(node_or_name, str):
node_name = node_or_name
else:
node_name = node_or_name.name
node_name = remove_trans_scp_prefix(node_name)
def _do_map(output_name, node_name):
if not output_name == node_name:
if not GLOBAL_MAP.get_ele(NNDCT_KEYS.OUTPUT_TO_NODE_MAP):
GLOBAL_MAP.set_map(NNDCT_KEYS.OUTPUT_TO_NODE_MAP, {})
if not GLOBAL_MAP.get_ele(NNDCT_KEYS.NODE_TO_OUTPUT_MAP):
GLOBAL_MAP.set_map(NNDCT_KEYS.NODE_TO_OUTPUT_MAP, {})
#map output to node
output_to_node_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.OUTPUT_TO_NODE_MAP)
if not output_name in output_to_node_map:
nndct_debug_print(
"<map_output_and_node> map out {} and node{}".format(
output_name, node_name),
level=NNDCT_DEBUG_LVL.BUILD_GRAPH)
output_to_node_map[output_name] = node_name
else:
assert output_to_node_map[
output_name] == node_name, "restored node name for output_name {} is {}, meet new node name {}".format(
output_name, output_to_node_map[output_name], node_name)
#add output to list keyed by node_name
node_to_output_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.NODE_TO_OUTPUT_MAP)
if not node_name in node_to_output_map:
node_to_output_map[node_name] = [output_name]
else:
node_to_output_map[node_name].append(output_name)
if isinstance(output, str):
_do_map(output, node_name) | null |
23,214 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
GLOBAL_MAP = GlobalMap()
def node_from_output(output_name, model_type):
if model_type == 'Nndct':
return output_name
if model_type == 'tensorflow':
output_name = output_name.split(':')[0]
elif model_type == 'torch':
if output_name.split('_')[-1] in ['backward', 'forward']:
output_name = ''.join(output_name.split('_')[:-1])
else:
raise KeyError("node_from_output is not available for model type " +
str(model_type))
output_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.OUTPUT_TO_NODE_MAP)
if output_map and output_name in output_map:
return output_map[output_name]
return output_name | null |
23,215 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
GLOBAL_MAP = GlobalMap()
def get_output_from_node(node_name, idx=-1):
node_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.NODE_TO_OUTPUT_MAP)
if node_map and node_name in node_map:
return node_map[node_name][idx]
return node_name | null |
23,216 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
GLOBAL_MAP = GlobalMap()
def get_all_outputs_from_node(node_name):
node_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.NODE_TO_OUTPUT_MAP)
if node_map and node_name in node_map:
return node_map[node_name]
return node_name | null |
23,217 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def to_des_dict(dicts, as_des=True, extra_types={}):
assert isinstance(dicts,list) and all(isinstance(d,dict) for d in dicts),\
"dicts should be list of dictionaries, please check!"
des_dict = {}
for d in dicts:
for k, v in d.items():
if isinstance(v, np.ndarray):
v = v.tolist()
elif isinstance(v, str) and not k in ['dtype'] and as_des:
v = "'{}'".format(v)
elif k == 'shape' and v == []:
continue
elif isinstance(v, dict):
v = to_des_dict([v], as_des, extra_types)
for typek, func in extra_types.items():
if isinstance(v, typek):
v = func(v)
des_dict[k] = v
return des_dict
def dict_to_str(kwargs, connect='=', as_des=False):
if as_des:
kwargs = to_des_dict([kwargs])
return ','.join(["{}{}{}".format(k, connect, v) for k, v in kwargs.items()]) | null |
23,218 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def check_diff(matA, matB, nameA, nameB, error, with_msg=True):
is_pass = True
mat = matA - matB
mat = mat / np.sqrt(matA**2 + error)
title = "{:25} VS {:25} : ".format(nameA, nameB)
res = {}
string = ''
res['max'] = mat.max()
res['min'] = mat.min()
for item in res:
string += item + ':' + str(res[item]) + ' '
if abs(res[item]) > error:
msg = "{}({}) out of tolerance({})!".format(item, res[item], error)
is_pass = False
break
if with_msg:
if is_pass:
msg = string + '(tolerance {})'.format(error)
print("{}{}{:60}{}".format('**', '[PASS] ' + title, msg, '**'))
else:
print("{}{}{:60}{}".format('**', '[FAIL] ' + title, msg, '**'))
return is_pass | null |
23,219 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def to_jsonstr(obj, pre_space=2):
def _json_lst_str(lst):
string = ""
for idx in range(len(lst)):
if isinstance(lst[idx], str):
string += '"{}",'.format(lst[idx])
elif isinstance(lst[idx], list):
#string += _json_lst_str(lst[idx])
string += '[{}],'.format(_json_lst_str(lst[idx]))
elif lst[idx] is None:
string += 'null,'
else:
string += str(lst[idx]) + ','
return string[:-1]
assert isinstance(obj, dict)
string = ""
for k, v in obj.items():
string += '{}"{}":'.format(pre_space * ' ', k)
if isinstance(v, list):
string += '[{}],\n'.format(_json_lst_str(v))
elif isinstance(v, str):
string += '"{}",\n'.format(v)
elif isinstance(v, dict):
string += '\n{},\n'.format(to_jsonstr(v, pre_space=pre_space + 2))
elif isinstance(v, bool):
string += '{},\n'.format('true' if v else 'false')
else:
string += '{},\n'.format(v)
return '{}{{\n{}\n{}}}'.format((pre_space - 2) * ' ', string[:-2],
(pre_space - 2) * ' ') | null |
23,220 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def load_json_obj(file_or_obj):
if isinstance(file_or_obj, str):
with open(file_or_obj, 'r') as f:
obj = json.load(f)
elif isinstance(file_or_obj, dict):
obj = file_or_obj
else:
return None
return obj | null |
23,221 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def dpu_format_print(mat):
flatten_mat = mat.reshape(mat.size)
cnt = 0
while cnt < len(flatten_mat):
print(("{:0>2x}" * 16).format(*tuple(flatten_mat[cnt:cnt + 16])))
cnt += 16 | null |
23,222 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def copy_folder_files(new_dir, old_dir):
for file_name in os.listdir(old_dir):
full_file_name = os.path.join(old_dir, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, new_dir)
def force_create_dir(dir_name, copy_from_dir=None):
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
if copy_from_dir:
copy_folder_files(dir_name, copy_from_dir) | null |
23,223 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def print_center_edge(string, to_str=False, blank_line=0, width=120):
center_str = "{0}>>{1:40}<<{0}".format("=" * 30,
string.center(40)).center(width)
center_str += '\n' * blank_line
if to_str:
return center_str
else:
print(center_str) | null |
23,224 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def log_or_print(str, logger=None):
if logger:
logger.info(str)
else:
print(str)
def basic_info(mat, name=None, logger=None, to_str=False):
if isinstance(mat, np.ndarray):
info_str = "<Array>{}[{}]: max:{}, min:{}, sum:{}".format(
'' if not name else name, mat.shape, mat.max(), mat.min(), mat.sum())
else:
info_str = "<Non_Array>{}:{}".format('' if not name else name, mat)
if to_str:
return info_str
else:
log_or_print(info_str, logger=logger) | null |
23,225 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def print_csv_format(mat):
assert mat.ndim == 2
for row in range(mat.shape[0]):
for col in range(mat.shape[1]):
print(str(mat[row, col]) + ',', end='')
print('') | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.