id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
23,989 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _pack(xgraph: XGraph, node: Node, pack_name: str, packed_item: List[Any],
quant_config: NndctQuantInfo) -> Tuple["xir.Op", List["xir.Op"]]:
"""
pack items into stack op
"""
pack_list = []
pack_input_ops: Dict[str, List["xir.Op"]] = {}
for i, item in enumerate(packed_item):
if isinstance(item, Tensor):
pack_list.append(xgraph.get_op_by_name(item.node.name))
else:
# dtype = np.int64 if isinstance(item, int) else np.float64
dtype = np.float32
const_op = xgraph.create_fixed_const_op(
name=node.name + f"_{pack_name}_attr[{i}]",
data=np.array([item], dtype=dtype),
quant_info=quant_config)
pack_list.append(const_op)
pack_input_ops["input"] = pack_list
attrs: Dict[str, Any] = {}
attrs["axis"] = 0
sub_op_pack = xgraph.create_fixed_normal_op(
node.name + f"_{pack_name}_i0",
"stack",
quant_config,
attrs=attrs,
input_ops=pack_input_ops)
return sub_op_pack, pack_list
def shape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
r""" nndct shape is a macro operator, including shape, stridedslice
"""
# raise NotImplementedError("shape")
input_list = []
shape_input_ops: Dict[str, List["xir.Op"]] = {}
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
shape_input_ops["input"] = input_list
sub_op_shape = xgraph.create_fixed_normal_op(
node.name + "_i0", "shape", quant_config, input_ops=shape_input_ops)
attrs: Dict[str, Any] = {}
strided_slice_input_ops: Dict[str, List["xir.Op"]] = {}
strided_slice_input_ops["input"] = [sub_op_shape]
dim = node.node_attr(node.op.AttrName.AXIS)
attrs["begin"] = [dim]
attrs["end"] = [dim + 1]
xgraph.create_fixed_normal_op(
node.name,
"strided_slice",
quant_config,
attrs=attrs,
input_ops=strided_slice_input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
The provided code snippet includes necessary dependencies for implementing the `reshape` function. Write a Python function `def reshape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn` to solve the following problem:
r""" nndct reshape is a macro operator, including pack, reshape
Here is the function:
def reshape(xgraph: XGraph, node: Node,
quant_config: NndctQuantInfo) -> NoReturn:
r""" nndct reshape is a macro operator, including pack, reshape
"""
shape = node.node_attr(node.op.AttrName.SHAPE)
sub_op_pack, pack_list = _pack(xgraph, node, "shape", shape, quant_config)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["shape"] = [sub_op_pack]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name, "reshape", quant_config, input_ops=input_ops) | r""" nndct reshape is a macro operator, including pack, reshape |
23,996 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
attrs = None
if len(node.op.attrs) > 0:
attrs: Dict[str, Any] = {}
for attr_name, attr_value in node.op.attrs.items():
if node.op.is_xir_attr(attr_name):
attrs[attr_name.value] = _Converter.to_xir_attr_value(node.op.type, attr_name.value, attr_value.value)
return attrs
def scale(xgraph, node, quant_config):
attrs: Dict[str, Any] = {}
input_ops: Dict[str, List["xir.Op"]] = {}
if node.has_bound_params():
for param_name, param_tensor in node.op.params.items():
if param_name == node.op.ParamName.GAMMA:
input_ops['scale'] = [xgraph.get_op_by_name(param_tensor.name)]
if param_name == node.op.ParamName.BETA:
input_ops['bias'] = [xgraph.get_op_by_name(param_tensor.name)]
input_list = []
for input in node.in_tensors:
if node.has_bound_params() and input.is_param_tensor():
continue
elif input.is_param_tensor():
input_op = xgraph.get_op_by_name(input.name)
else:
input_op = xgraph.get_op_by_name(input.node.name)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
xgraph.create_fixed_normal_op(node.name, "scale", quant_config, attrs=attrs, input_ops=input_ops)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def hswish(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
scale = 6.0 * 2731.0 / 16384.0
attrs = _get_xir_attr_from_node(node)
node_input_op = xgraph.get_op_by_name(node.in_nodes[0])
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [node_input_op]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
hsigmoid_op = xgraph.create_fixed_normal_op(
node.name + "_i0", "hard-sigmoid", quant_config, attrs=attrs, input_ops=input_ops)
scale = [scale]
const_op = xgraph.create_const_op(name=node.name + "_i1", data=np.array(scale, dtype=np.float32))
input_ops["input"] = [hsigmoid_op, const_op]
mul_op = xgraph.create_normal_op(node.name + '_mul', "mul", input_ops=input_ops)
if quant_config and node.name in quant_config['output'] and quant_config["output"][node.name][0] is not None:
mul_fp = [8, None]
mul_fp[0], _ = quant_config['output'][node.name][0]
mul_fp[1] = mul_fp[0] - 1
attrs: Dict[str, Any] = {}
attrs['fix_point'] = mul_fp[1]
attrs['bit_width'] = mul_fp[0]
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops['input'] = [mul_op]
op_name = mul_op.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
mul_fixed_op = xgraph.create_normal_op(op_name, 'fix', attrs=attrs, input_ops=input_ops)
input_ops["input"] = [mul_fixed_op, node_input_op]
else:
input_ops["input"] = [mul_op, node_input_op]
hswish_fixed_op = xgraph.create_fixed_normal_op(
node.name, "mul", quant_config, input_ops=input_ops) | null |
23,999 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
def binary_op(op_type: str, xgraph: XGraph, node: Node, quant_config: NndctQuantInfo):
def to_binary_op(xop_type):
return partial(binary_op, xop_type) | null |
24,009 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
def get_templates_from_dpu_compiler():
"""
get pattern info from compiler
"""
import xir
import xcompiler
templates = xcompiler.get_templates()
topsorted_templates = []
for template in templates:
topsorted_templates.append((template.get_name(), [op for op in template.toposort()]))
return topsorted_templates
def is_valid_pattern(pattern):
fix = {"fix"}
float2fix = {"float2fix"}
fix2float = {"fix2float"}
argmax = {"argmax"}
data = {"data"}
const = {"const"}
op_types = set()
for node in pattern.nodes:
op_types.update(pattern.get_node_types(node))
if fix.issubset(op_types):
msg = "This is a transfer pass template."
return False, msg
elif not (fix2float.issubset(op_types) or float2fix.issubset(op_types)):
msg = "There is no fix in template."
return False, msg
elif all([{op_type} in [fix2float, float2fix] for op_type in op_types]):
msg = "Only fix in template"
return False, msg
elif argmax.issubset(op_types):
msg = "argmax template is ignored."
return False, msg
elif len(list(pattern.nodes)) == 2 and (data.issubset(op_types) or const.issubset(op_types)):
msg = "data-fix/const-fix are ignored."
return False, msg
if not nx.algorithms.is_directed_acyclic_graph(pattern.graph):
msg = f"{pattern.name} has cycles, please contact developer to fix it."
return False, msg
return True, ""
def reorder_patterns(patterns):
new_patterns = []
pattern_len_map = {}
pattern_map = {pattern.name: pattern for pattern in patterns}
fix_type = {NNDCT_OP.FIX}
for pattern in patterns:
pattern_len = 0
for node in pattern.nodes:
if pattern.get_node_types(node) != fix_type:
pattern_len += 1
pattern_len_map[pattern.name] = pattern_len
sorted_patterns = sorted(pattern_len_map.items(), key=lambda x: x[1], reverse=True)
log_debug_info(f"==============sorted patterns(total {len(sorted_patterns)} patterns)====================")
for pattern_name, _, in sorted_patterns:
log_debug_info(pattern_name)
log_debug_info(str(pattern_map[pattern_name]))
return [pattern_map[pattern_name] for pattern_name, _ in sorted_patterns]
def create_pattern_graph(name: str, ops: "List[xir.op_template]"):
pattern_graph = Graph(name)
for op in ops:
pattern_graph.add_node(get_op_name(op), Node(op_types=get_op_type(op)))
for op in ops:
for inp in get_input_ops(op):
pattern_graph.add_edge(get_op_name(inp), get_op_name(op))
for outp in get_output_ops(op):
pattern_graph.add_edge(get_op_name(op), get_op_name(outp))
return pattern_graph
def convert_xir_type_to_nndct_type(pattern_graph):
for node in pattern_graph.nodes:
xir_types = pattern_graph.get_node_types(node)
nndct_types = set()
for ty in xir_types:
if ty in _XIR2NNCT:
nndct_types.update(_XIR2NNCT.get(ty, {ty}))
if nndct_types:
pattern_graph.set_node_types(node, nndct_types)
else:
return False
return True
def transform_pattern_graph(pattern_graph):
_merge_constfix(pattern_graph)
_merge_float2fix_fix2float_pair(pattern_graph)
_convert_fix_like_op_to_fix(pattern_graph)
_merge_mul_coeff(pattern_graph)
_remove_mul_for_hswish(pattern_graph)
def log_debug_info(msg):
if NndctOption.nndct_inspect_debug.value:
NndctDebugLogger.write(f"{msg}\n")
class XIRHelper(object):
def find_xops_from_nndct_node(cls, nndct_node, xmodel):
xop_lst = []
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", nndct_node.name)
for xop in cls.get_xmodel_ops(xmodel):
if cls.get_xop_type(xop) in ["download", "upload", "fix2float", "float2fix", "transpose", "fix", "data-fix"]:
continue
if formal_name in cls.get_xop_name(xop):
xop_lst.append(xop)
return xop_lst
def get_xop_device_type(xop):
if xop.has_attr("device"):
return xop.get_attr("device")
else:
return None
def get_xop_name(xop):
return xop.get_name()
def get_xop_template_name(op_template):
return op_template.get_name()
def get_xop_template_types(op_template):
return op_template.get_types()
def get_xmodel_ops(xmodel):
return xmodel.get_ops()
def get_xop_type(xop):
return xop.get_type()
def get_input_xops(xop):
return xop.get_input_ops()["input"]
def get_op_partition_msg(xop):
msg = ""
if xop and xop.has_attr("partition_msg"):
msg = xop.get_attr("partition_msg")
elif xop and xop.has_attr("error_msg"):
msg = xop.get_attr("error_msg")
return msg
def is_dpu_pattern(cls, xmodel):
for xop in cls.get_xmodel_ops(xmodel):
if cls.get_xop_device_type(xop) == "CPU":
if cls.get_xop_type(xop) == "reshape-fix":
input_op = cls.get_input_xops(xop)[0]
if cls.get_xop_type(input_op) not in ["data", "data-fix"]:
return False
elif cls.get_xop_type(xop) not in ["fix2float", "download"]:
return False
elif cls.get_xop_device_type(xop) is None:
return False
return True
def get_pattern_partition_msg(cls, xmodel):
msg = ""
for xop in cls.get_xmodel_ops(xmodel):
msg += cls.get_op_partition_msg(xop)
return msg
def is_valid_compiled_pattern(cls, xmodel):
for xop in cls.get_xmodel_ops(xmodel):
if xop is None or xop.has_attr("error_msg"):
return False
if any([cls.get_xop_device_type(xop) is None for xop in cls.get_xmodel_ops(xmodel)]):
return False
return True
def build_patterns_from_dpu_templates():
templates = get_templates_from_dpu_compiler()
log_debug_info("\nAll patterns from xcompiler:")
for id, (name, ops) in enumerate(templates):
log_debug_info(f"pattern id:{id}")
for op in ops:
log_debug_info(f"op name:{XIRHelper.get_xop_template_name(op)} type:{XIRHelper.get_xop_template_types(op)}")
patterns = []
pattern_graphs = []
for id, (name, ops) in enumerate(templates):
pattern_graph = create_pattern_graph(f"{name}_{id}", ops)
ret, msg = is_valid_pattern(pattern_graph)
if ret:
pattern_graphs.append(pattern_graph)
else:
log_debug_info(f"{pattern_graph.name} is filtered.({msg}).")
# pattern_graphs = pattern_graphs + _gen_pattern_from_sim_pattern()
log_debug_info("\nPattern Transformation:")
for pattern_graph in pattern_graphs:
log_debug_info(f"{pattern_graph.name} pattern")
log_debug_info("================Before transformation====================")
log_debug_info(str(pattern_graph))
transform_pattern_graph(pattern_graph)
if convert_xir_type_to_nndct_type(pattern_graph):
patterns.append(pattern_graph)
else:
log_debug_info(f"{pattern_graph.name} is ignored for there is at least one unknown op in the pattern.")
log_debug_info("================After transformation====================")
log_debug_info(str(pattern_graph))
patterns = reorder_patterns(patterns)
return patterns | null |
24,013 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
def _set_input_by_upstream(node: Node, expanding_desc: Mapping[str, StructuredExpanding]) -> StructuredExpanding:
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, WeightedNodeStructuredExpanding), \
"Variable node_expanding here has to be instance of WeightedNodeStructuredExpanding"
input_expanding = expanding_desc[node.in_nodes[0]]
node_expanding.in_dim = input_expanding.out_dim
for weight_insert in input_expanding.out_inserts:
node_expanding.add_weight_in_insert(
DataInsert(weight_insert.position, weight_insert.added_num_channels))
return node_expanding
def _modify_depthwise(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, WeightedNodeStructuredExpanding), \
"Variable node_expanding here has to be instance of WeightedNodeStructuredExpanding"
input_expanding = expanding_desc[node.in_nodes[0]]
dw_multiplier = node.op.attr['out_dim'] // node.op.attr['in_dim']
node.op.attr["group"] += input_expanding.added_out_channel
node.op.attr['in_dim'] += input_expanding.added_out_channel
node.op.attr['out_dim'] += input_expanding.added_out_channel * dw_multiplier
node_expanding.in_dim = node.op.attr['in_dim']
node_expanding.out_dim = node.op.attr['out_dim']
for input_insert in input_expanding.out_inserts:
node_expanding.add_weight_out_insert(
DataInsert(input_insert.position * dw_multiplier, input_insert.added_num_channels * dw_multiplier))
node_expanding.add_bias_insert(
DataInsert(input_insert.position * dw_multiplier, input_insert.added_num_channels * dw_multiplier))
OpTypes.DEPTHWISE_CONV2D, OpTypes.DEPTHWISE_CONV3D,
OpTypes.DEPTHWISE_CONVTRANSPOSE2D, OpTypes.DEPTHWISE_CONVTRANSPOSE3D
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
self._node_name: str = node_name
self._in_dim: int = 0
self._out_dim: int = 0
def node_name(self) -> str:
return self._node_name
def in_dim(self) -> int:
return self._in_dim
def in_dim(self, v: int) -> None:
self._in_dim = v
def out_dim(self) -> int:
return self._out_dim
def out_dim(self, v: int) -> None:
self._out_dim = v
def added_out_channel(self) -> int:
raise NotImplementedError("method added_out_channel is not implemented")
def added_in_channel(self) -> int:
raise NotImplementedError("method added_in_channel is not implemented")
def out_inserts(self) -> List[DataInsert]:
raise NotImplementedError("method out_inserts is not implemented")
class Graph(GraphBase):
""" Graph object of NNDCT, contain list of NndctNodes.
That will be used for topology or export to XGraph"""
def __init__(self, graph_name=None):
super(Graph, self).__init__()
self._name = graph_name or 'NndctGraph'
self._nodes_by_name = {}
self._nodes_by_id = {}
self._end_tensors = []
self._copy_tensor_map = {}
self._tensors = {}
self._blocks = []
self._param_names = []
self._top_block = None
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
if isinstance(node_or_name, str):
return node_or_name in self._nodes_by_name
else:
return node_or_name.name in self._nodes_by_name
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone(self):
graph = self.__class__(self.name)
graph.clone_from(self)
return graph
def clone_from(self, src_graph):
local_map = {}
converted_nodes = []
head_node = self.create_node_from(src_graph.head_node, local_map, converted_nodes)
return_node = self.create_node_from(src_graph.return_node, local_map, converted_nodes)
top_block = Block(self, None, head_node, return_node)
self.set_top_block(top_block)
self._top_block.clone_from(src_graph.block, local_map, converted_nodes)
def create_node_from(self, src_node, local_map, converted_nodes):
node = Node(src_node.name, dtype=src_node.dtype, in_quant_part=src_node.in_quant_part)
node.owning_graph = self
node.idx = src_node.idx
node.scope_name = src_node.scope_name
node.source_range = src_node.source_range
node.target_device = src_node.target_device
node.normalized_name = src_node.normalized_name
converted_nodes.append(src_node.name)
for out in src_node.out_tensors:
if out.name in local_map:
node.add_out_tensor(local_map[out.name])
else:
tensor = Tensor(name=out.name)
tensor.clone_from(out)
local_map[out.name] = tensor
node.add_out_tensor(tensor)
for inp in src_node.in_tensors:
if inp.name in local_map:
node.add_in_tensor(local_map[inp.name])
else:
tensor = Tensor(name=inp.name)
tensor.clone_from(inp)
local_map[inp.name] = tensor
node.add_in_tensor(tensor)
node.clone_from(src_node, local_map)
for src_block in src_node.blocks:
head_node = self.create_node_from(src_block.input_node, local_map, converted_nodes)
return_node = self.create_node_from(src_block.return_node, local_map, converted_nodes)
block = Block(self, node, head_node, return_node)
block.clone_from(src_block, local_map, converted_nodes)
node.add_block(block)
return node
def node(self, name):
"""Return node with the specified name"""
return self._nodes_by_name.get(name, None)
def get_node_by_idx(self, idx):
node = self._nodes_by_id.get(idx, None)
assert node is not None
return node
def get_input_nodes(self):
input_nodes = []
for node in self.nodes:
if (len(self.parents(node)) == 0) and \
(node.op.type==NNDCT_OP.INPUT or node.op.type==NNDCT_OP.TUPLE_INPUT):
input_nodes.append(node)
return input_nodes
def get_input_tensors(self, input_args):
input_tensors = []
graph_name = self.name
input_nodes = self.get_input_nodes()
for idx in range(len(input_args)):
#input_node_name = graph_name + "::input_" + str(idx)
#input_node = self.node(input_node_name)
input_node = input_nodes[idx]
input_tensor = input_node.out_tensors[0]
if input_node.op.type == NNDCT_OP.INPUT:
input_tensors.append(input_tensor.name)
elif input_node.op.type == NNDCT_OP.TUPLE_INPUT:
for index in range(len(input_args[idx])):
input_tensor_name = input_tensor.name + '.' + str(index)
input_tensors.append(input_tensor_name)
return input_tensors
def get_return_tensors(self):
return_tensors = []
for tensor in self.return_node.in_tensors:
return_tensors.append(tensor.name)
return return_tensors
def add_node(self, node: Node) -> None:
if node.name in self._nodes_by_name:
return
if node.idx in self._nodes_by_id and node is not self._nodes_by_id[node.idx]:
raise RuntimeError(f"The id `{node.idx}` of {node.name} has been added into graph")
if node.idx == -1:
# if not self._nodes_by_id:
# node._idx = 0
# else:
# node._idx = max([node.idx for node in self.all_nodes()]) + 1
node._idx = -sys.maxsize + len(list(self.all_nodes()))
self._nodes_by_name[node.name] = node
self._nodes_by_id[node.idx] = node
def free_node(self, node):
node.owning_graph = None
self._nodes_by_name.pop(node.name)
self._nodes_by_id.pop(node.idx)
def remove_node(self, node):
assert node.in_tensors
assert len(node.out_tensors) == 1
out_tensor = node.out_tensors[0]
inp_tensor = node.in_tensors[0]
out_tensor.replace_uses_with(inp_tensor)
node.destroy()
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
if any([node_type in self.op_types for node_type in node_types]):
nodes_to_remove = []
for node in self.nodes:
if node.op.type in node_types:
nodes_to_remove.append(node)
for node in nodes_to_remove:
self.remove_node(node)
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
conv_nodes = []
for node in self.nodes:
if node.op.type in node_types:
conv_nodes.append(node)
else:
continue
return conv_nodes
def reconnect_nodes(self):
self._nodes_by_id.clear()
for idx, node in enumerate(self.nodes):
node.idx = idx
self._nodes_by_id[idx] = node
node.clean_connections()
self.connect_nodes()
def connect_nodes(self):
for nodeA in self.nodes:
for input_tensor in nodeA.in_tensors:
for nodeB in self.nodes:
if nodeB is not nodeA and input_tensor in nodeB.out_tensors:
#nodeB.outputs.add(input_tensor.node.name)
nodeB.add_out_node(nodeA.name)
nodeA.add_in_node(input_tensor.node.name)
def parents(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.in_nodes]
def children(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.out_nodes]
def add_tensor(self, tensor):
self._tensors[tensor.name] = tensor
def tensor(self, name):
return self._tensors.get(name, None)
def param_tensor(self, name):
for node in self.all_nodes():
for _, tensor in node.op.params.items():
if tensor.name == name:
return tensor
def add_end_tensor(self, tensor):
self._end_tensors.append(tensor)
def __repr__(self):
return f"Graph(name={self.name})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def description(self):
graph_des = {}
graph_des['graph_name'] = f"{self.__class__.__name__}"
graph_des['nodes'] = []
for n in sorted(self.nodes, key=lambda n: n.idx):
graph_des['nodes'].append(n.description())
return graph_des
def set_node_id(self, index, node):
node.idx = index
self._nodes_by_id[index] = node
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
"""
create a subgraph from nodeset belong to origin graph
"""
assert len(nodeset) >= 2
sorted_nodeset = origin_graph.top_sort_nodeset(nodeset)
for node in sorted_nodeset:
node.remove_from_list()
subgraph = cls(graph_name)
sorted_nodeset[0].owning_graph = subgraph
sorted_nodeset[-1].owning_graph = subgraph
block = Block(subgraph, None, sorted_nodeset[0], sorted_nodeset[-1])
subgraph.set_top_block(block)
if len(sorted_nodeset) > 2:
for node in sorted_nodeset[1:-1]:
node.owning_graph = subgraph
subgraph.append_node(node)
return subgraph
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
sorted_nodeset = sorted(nodeset, key=lambda n: n.topo_position)
return sorted_nodeset
def get_topological_graph_nodes_list(self):
nodes_list = [node for node in self.nodes]
return Graph.top_sort_nodeset(nodes_list)
def name(self):
return self._name
def name(self, name):
self._name = name
def nodes(self):
return self._top_block.nodes
def reverse_nodes(self):
return self._top_block.reverse_nodes
def tensors(self):
for tensor in self._tensors.values():
yield tensor
# TODO: Remove
def end_tensors(self):
return [tensor for tensor in self.return_node.in_tensors]
def inputs(self):
return [node for node in self.all_nodes() if not node.in_nodes]
def outputs(self):
return [node for node in self.all_nodes() if not node.out_nodes]
def op_types(self):
return {node.op.type for node in self.all_nodes()}
def append_node(self, node):
self._top_block.append_node(node)
def add_param_name(self, param_name):
if param_name not in self._param_names:
self._param_names.append(param_name)
def param_names(self):
return list(self._param_names)
def block(self):
return self._top_block
def is_tensor_in_graph(self, tensor_name):
return True if tensor_name in self._tensors else False
def update_node_idx(self, node, index):
self._nodes_by_id[index] = node
def clear_node_id_map(self):
self._nodes_by_id.clear()
def remove_tensor(self, tensor):
self._tensors.pop(tensor.name)
if tensor.name in self._param_names:
self._param_names.remove(tensor.name)
def insert_node_between_nodes(self, new_node, parent_node, child_node):
assert parent_node.in_node_list() and child_node.in_node_list()
assert (parent_node.owning_graph == child_node.owning_graph
and parent_node.owning_block == child_node.owning_block)
new_node.owning_block = parent_node.owning_block
new_node.owning_graph = parent_node.owning_graph
tensor = Tensor(name=new_node.name, node=new_node)
new_node.add_out_tensor(tensor)
out_tensor = None
offset = None
for out in parent_node.out_tensors:
for use in out.uses:
if use.user is child_node:
out_tensor = out
offset = use.offset
break
#out_tensor.replace_uses_with(new_node.out_tensors[0])
child_node.replace_input_at(offset, new_node.out_tensors[0])
new_node.add_in_tensor(out_tensor)
new_node.insert_after(parent_node)
def set_top_block(self, block):
self._top_block = block
def add_block(self, block):
self._blocks.append(block)
def all_blocks(self):
return self._blocks
def all_nodes(self):
for _, node in self._nodes_by_name.items():
yield node
def head_node(self):
return self._top_block.input_node
def return_node(self):
return self._top_block.return_node
def clean_tensors_data(self):
for tensor in self.tensors:
tensor.clean_data()
def assign_node_topological_name(self, prefix="", suffix=""):
count = itertools.count(0)
illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
def _assgin_nodes(nodes):
sorted_nodes = self.top_sort_nodeset(list(nodes))
for n in sorted_nodes:
if n.blocks:
for block in n.blocks:
_assgin_nodes(block.nodes)
else:
candidate = illegal_char_regex.sub("_", n.op.type)
n.normalized_name = f"{prefix}{candidate}_{next(count)}{suffix}"
_assgin_nodes(self.nodes)
def simple_description(self):
"""
Only describe op type topological info.
"""
def get_node_simple_info(node):
node_des = {}
node_des['op'] = node.op.type
node_des["input_ops"] = [node.owning_graph.node(inode).op.type for inode in node.in_nodes]
node_des["output_ops"] = [node.owning_graph.node(onode).op.type for onode in node.out_nodes]
if node.blocks:
for i, block in enumerate(node.blocks):
node_des[f'block_{i}'] = []
for n in self.top_sort_nodeset(list(block.nodes)):
node_des[f'block_{i}'].append(get_node_simple_info(n))
return node_des
graph_des = {}
graph_des['nodes'] = []
for n in self.top_sort_nodeset(list(self.nodes)):
graph_des['nodes'].append(get_node_simple_info(n))
graph_str = json.dumps(graph_des, indent=2, separators=(',', ': '))
return graph_str
def get_md5(self):
import hashlib
graph_str = self.simple_description()
md = hashlib.md5()
md.update(graph_str.encode("utf-8"))
return md.hexdigest()
class Node(NodeBase):
"""A node contains an op and its input and output tensor.
"""
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
super().__init__()
self._name = name
self._op = op
self._dtype = dtype
self._idx = -1
self._scope_name = ""
self._source_range = ""
self._normalized_name = ""
self._in_tensors = []
self._out_tensors = []
self._in_nodes = []
self._out_nodes = []
self._blocks = []
self._is_quantizable = in_quant_part
self._is_merged = False
self._transpose_in_order = None
self._transpose_out_order = None
self._topo_position = 0
self._block = None
self._graph = None
self._neighbor_nodes = [None, None]
self._target_device = None
def __repr__(self):
return f"Node(name={self.name}, id={self.idx}, op_type={self.op.type}, quant_state={self.in_quant_part})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_node, local_map):
tmp_attrs = src_node.op._attrs
tmp_params = src_node.op._params
tmp_configs = src_node.op._configs
src_node.op._params = copy.copy(tmp_params)
src_node.op._attrs = copy.copy(tmp_attrs)
src_node.op._configs = copy.copy(tmp_configs)
self.op = copy.copy(src_node.op)
self.op._export_attr_and_param()
src_node.op._attrs = tmp_attrs
src_node.op._params = tmp_params
src_node.op._configs = tmp_configs
self.op.clone_from(src_node.op, local_map)
def scope_name(self):
return self._scope_name
def scope_name(self, name):
self._scope_name = name
def description(self):
node_des = {}
node_des['name'] = self._name
node_des['scope_name'] = self._scope_name
node_des['idx'] = self._idx
node_des['dtype'] = self._dtype
node_des['enable_quant'] = self._is_quantizable
node_des['in_nodes'] = [i for i in self.in_nodes]
node_des['out_nodes'] = [o for o in self.out_nodes]
node_des['in_tensors'] = [it.description() for it in self.in_tensors]
node_des['out_tensors'] = [ot.description() for ot in self.out_tensors]
node_des['op'] = self._op.description()
if self._blocks:
for i, block in enumerate(self._blocks):
node_des[f'block_{i}'] = []
for n in sorted(block.nodes, key=lambda n: n.idx):
node_des[f'block_{i}'].append(n.description())
return node_des
def clean_connections(self):
self._in_nodes = []
self._out_nodes = []
def add_in_node(self, node_name: str):
if node_name not in self._in_nodes:
self._in_nodes.append(node_name)
def add_out_node(self, node_name: str):
if node_name not in self._out_nodes:
self._out_nodes.append(node_name)
def in_tensors(self):
return self._in_tensors
def out_tensors(self):
return self._out_tensors
def in_nodes(self):
nodes = []
for tensor in self.in_tensors:
if tensor.node is not None:
nodes.append(tensor.node.name)
return nodes
def out_nodes(self):
nodes = []
for out in self.out_tensors:
for use in out.uses:
nodes.append(use.user.name)
return nodes
def node_attr(self, key):
return self._op.get_attr(key)
def set_node_attr(self, key, value):
if all([val is None for val in self._op._attr_value_mem[key]]):
self._op.set_attr(key, value)
else:
self._op.update_attr(key, value)
def node_config(self, key):
return self._op.get_config(key)
def set_node_config(self, key, value):
self._op.set_config(key, value)
def has_bound_params(self):
return self._op.has_native_params()
def op_type(self):
return self.op.type
def name(self):
return self._name
def name(self, value):
self._name = value
def idx(self):
return self._idx
def idx(self, index):
self._idx = index
self.owning_graph.update_node_idx(self, index)
def op(self):
return self._op
def op(self, op):
self._op = op
def dtype(self):
return self._dtype
# @property
# def alias(self):
# return self._alias
def in_quant_part(self) -> bool:
return self._is_quantizable
def in_quant_part(self, quant_state: bool) -> None:
self._is_quantizable = quant_state
def module(self):
return self._module()
def module(self, module):
self._module = weakref.ref(module)
def blocks(self):
return self._blocks
def add_block(self, block):
self._blocks.append(block)
def has_custom_op(self):
return isinstance(self.op, CustomOp)
def get_attr_val(self, attr_name):
attr = self.node_attr(attr_name)
return attr.data if isinstance(attr, Tensor) else attr
def merged(self):
return self._is_merged
def merged(self, flag):
self._is_merged = flag
def transpose_in_order(self):
return self._transpose_in_order
def transpose_in_order(self, order):
self._transpose_in_order = order
def transpose_out_order(self):
return self._transpose_out_order
def transpose_out_order(self, order):
self._transpose_out_order = order
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
for attr_name, attr_value in self.op.attrs.items():
if attr_value.value is old_tensor:
self.set_node_attr(attr_name, new_tensor)
def destroy(self):
if len(self.blocks) > 0:
raise RuntimeError("Can't destroy if or loop node.")
while len(self.out_tensors) > 0:
self.remove_output(len(self.out_tensors) - 1)
self.remove_all_inputs()
if self.in_node_list():
self.remove_from_list()
self.owning_graph.free_node(self)
def remove_output(self, i):
assert i < len(self.out_tensors)
assert len(self.out_tensors[i].uses) == 0
output = self.out_tensors.pop(i)
self.owning_graph.remove_tensor(output)
for output_offset in range(i, len(self.out_tensors)):
self.out_tensors[output_offset].offset -= 1
def replace_input_at(self, i, new_tensor):
old_tensor = self.in_tensors[i]
if old_tensor is new_tensor:
return
self.in_tensors[i] = new_tensor
uses = [u for u in old_tensor.uses]
attr_uses = [attr_u for attr_u in old_tensor.attr_uses]
for u in uses:
if u.user is self:
new_tensor.uses.append(u)
old_tensor.uses.remove(u)
for attr_u in attr_uses:
if attr_u.user is self.op:
old_tensor.replace_attr_with_new_tensor_v2(attr_u, new_tensor)
def remove_input(self, i):
self.drop_input(i)
for j in range(i + 1, len(self._in_tensors)):
it = self.find_use_for_input(j)
it.offset -= 1
self._in_tensors.pop(i)
def remove_all_inputs(self):
for i in range(len(self.in_tensors)):
self.drop_input(i)
self.in_tensors.clear()
def drop_input(self, i):
assert i < len(self.in_tensors)
input_value = self.in_tensors[i]
use_it = self.find_use_for_input(i)
input_value.uses.remove(use_it)
self.in_tensors[i] = None
return input_value
def find_use_for_input(self, i):
use_it = None
for use in self.in_tensors[i].uses:
if use.offset == i and use.user is self:
use_it = use
assert use_it is not None
return use_it
def owning_block(self):
return self._block
def owning_block(self, block):
self._block = block
def owning_graph(self):
return self._graph
def owning_graph(self, graph):
self._graph = graph
if self._graph:
self._graph.add_node(self)
def topo_position(self):
return self._topo_position
def topo_position(self, pos):
self._topo_position = pos
def insert_before(self, node):
assert node.in_node_list()
self.insert_after(node.prev_node)
def insert_after(self, node):
assert not self.in_node_list() and node.in_node_list()
assert node.owning_block is not None
self._block = node.owning_block
next_node = node.next_node
node.next_node = self
self.prev_node = node
self.next_node = next_node
next_node.prev_node = self
self.update_topo_position()
def update_topo_position(self):
is_first_node = self.prev_node is self.owning_block.input_node
is_last_node = self.next_node is self.owning_block.return_node
prev_pos = self.prev_node.topo_position
next_pos = self.next_node.topo_position
if is_last_node:
if is_first_node:
self.topo_position = MID_POSITION
return
if prev_pos >= (POSITION_UPPER_BOUND - APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = prev_pos + APPEND_INTERVAL
elif is_first_node:
if next_pos <= (POSITION_LOWER_BOUND + APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = next_pos - APPEND_INTERVAL
else:
pos_between = prev_pos + (next_pos - prev_pos) / 2
if pos_between == prev_pos:
self.owning_block.reindex_topo()
return
self.topo_position = pos_between
def next_node(self):
return self._neighbor_nodes[1]
def next_node(self, node):
self._neighbor_nodes[1] = node
def prev_node(self):
return self._neighbor_nodes[0]
def prev_node(self, node):
self._neighbor_nodes[0] = node
def in_node_list(self):
if self.next_node is None:
assert self.prev_node is None
return self.next_node is not None
def remove_from_list(self):
assert self.in_node_list()
if self.owning_block.input_node is self:
self.owning_block.input_node = self.next_node
self.owning_block = None
next_node = self.next_node
prev_node = self.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
self.next_node = None
self.prev_node = None
def add_in_tensor(self, tensor):
tensor.uses.append(Use(self, len(self.in_tensors)))
self._in_tensors.append(tensor)
self.owning_graph.add_tensor(tensor)
def add_out_tensor(self, tensor):
tensor.offset = len(self.out_tensors)
self._out_tensors.append(tensor)
tensor.node = self
self.owning_graph.add_tensor(tensor)
def target_device(self):
return self._target_device
def target_device(self, device):
self._target_device = device
def scope_name(self):
return self._scope_name
def scope_name(self, scope_name):
self._scope_name = scope_name
def source_range(self):
return self._source_range
def source_range(self, source_range):
self._source_range = source_range
def normalized_name(self):
return self._normalized_name
def normalized_name(self, name):
self._normalized_name = name
def is_depthwise_conv(op):
if op.type in DEPTHWISECONV_OPS:
return True
if not is_grouped_conv(op):
return False
out_channels = op.attr['out_dim']
in_channels = op.attr['in_dim']
group = op.attr['group']
return group == in_channels
])
def modify_conv2d(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
# In pytorch, dw conv is repesented by conv2d with groups == in_channels and
# out_channels == K * in_channels, where K is a positive integer.
if is_depthwise_conv(node.op):
_modify_depthwise(graph, node, expanding_desc)
return
assert node.op.attr['group'] == 1, 'Grouped convolution is not allowed.'
node_expanding = _set_input_by_upstream(node, expanding_desc)
node.op.attr["in_dim"] += node_expanding.added_in_channel
node.op.attr["out_dim"] += node_expanding.added_out_channel
node_expanding.in_dim = node.op.attr["in_dim"]
node_expanding.out_dim = node.op.attr["out_dim"] | null |
24,014 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class DataInsert(object):
def __init__(self, position: int = 0, added_num_channels: int = 0, added_data: Tensor = None) -> None:
self._position: int = position
self._added_num_channels: int = added_num_channels
self._added_data: Tensor = added_data
def position(self) -> int:
return self._position
def added_num_channels(self) -> int:
return self._added_num_channels
def added_data(self) -> Tensor:
return self._added_data
def added_data(self, data: Tensor) -> None:
self._added_data = data
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
self._node_name: str = node_name
self._in_dim: int = 0
self._out_dim: int = 0
def node_name(self) -> str:
return self._node_name
def in_dim(self) -> int:
return self._in_dim
def in_dim(self, v: int) -> None:
self._in_dim = v
def out_dim(self) -> int:
return self._out_dim
def out_dim(self, v: int) -> None:
self._out_dim = v
def added_out_channel(self) -> int:
raise NotImplementedError("method added_out_channel is not implemented")
def added_in_channel(self) -> int:
raise NotImplementedError("method added_in_channel is not implemented")
def out_inserts(self) -> List[DataInsert]:
raise NotImplementedError("method out_inserts is not implemented")
class InstanceNormStructuredExpanding(StructuredExpanding):
def __init__(self, node_name: str) -> None:
super().__init__(node_name)
self._beta_inserts: List[DataInsert] = []
self._gamma_inserts: List[DataInsert] = []
def added_out_channel(self) -> int:
ret = 0
for insert in self._gamma_inserts:
ret += insert.added_num_channels
return ret
def added_in_channel(self) -> int:
return self.added_out_channel
def beta_inserts(self) -> List[DataInsert]:
return self._beta_inserts
def beta_inserts(self, v: List[DataInsert]) -> None:
self._beta_inserts = v
def gamma_inserts(self) -> List[DataInsert]:
return self._gamma_inserts
def gamma_inserts(self, v: List[DataInsert]) -> None:
self._gamma_inserts = v
def out_inserts(self) -> List[DataInsert]:
return self._gamma_inserts
def add_beta_insert(self, weight_insert: DataInsert):
self._beta_inserts.append(weight_insert)
def add_gamma_insert(self, weight_insert: DataInsert):
self._gamma_inserts.append(weight_insert)
class Graph(GraphBase):
""" Graph object of NNDCT, contain list of NndctNodes.
That will be used for topology or export to XGraph"""
def __init__(self, graph_name=None):
super(Graph, self).__init__()
self._name = graph_name or 'NndctGraph'
self._nodes_by_name = {}
self._nodes_by_id = {}
self._end_tensors = []
self._copy_tensor_map = {}
self._tensors = {}
self._blocks = []
self._param_names = []
self._top_block = None
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
if isinstance(node_or_name, str):
return node_or_name in self._nodes_by_name
else:
return node_or_name.name in self._nodes_by_name
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone(self):
graph = self.__class__(self.name)
graph.clone_from(self)
return graph
def clone_from(self, src_graph):
local_map = {}
converted_nodes = []
head_node = self.create_node_from(src_graph.head_node, local_map, converted_nodes)
return_node = self.create_node_from(src_graph.return_node, local_map, converted_nodes)
top_block = Block(self, None, head_node, return_node)
self.set_top_block(top_block)
self._top_block.clone_from(src_graph.block, local_map, converted_nodes)
def create_node_from(self, src_node, local_map, converted_nodes):
node = Node(src_node.name, dtype=src_node.dtype, in_quant_part=src_node.in_quant_part)
node.owning_graph = self
node.idx = src_node.idx
node.scope_name = src_node.scope_name
node.source_range = src_node.source_range
node.target_device = src_node.target_device
node.normalized_name = src_node.normalized_name
converted_nodes.append(src_node.name)
for out in src_node.out_tensors:
if out.name in local_map:
node.add_out_tensor(local_map[out.name])
else:
tensor = Tensor(name=out.name)
tensor.clone_from(out)
local_map[out.name] = tensor
node.add_out_tensor(tensor)
for inp in src_node.in_tensors:
if inp.name in local_map:
node.add_in_tensor(local_map[inp.name])
else:
tensor = Tensor(name=inp.name)
tensor.clone_from(inp)
local_map[inp.name] = tensor
node.add_in_tensor(tensor)
node.clone_from(src_node, local_map)
for src_block in src_node.blocks:
head_node = self.create_node_from(src_block.input_node, local_map, converted_nodes)
return_node = self.create_node_from(src_block.return_node, local_map, converted_nodes)
block = Block(self, node, head_node, return_node)
block.clone_from(src_block, local_map, converted_nodes)
node.add_block(block)
return node
def node(self, name):
"""Return node with the specified name"""
return self._nodes_by_name.get(name, None)
def get_node_by_idx(self, idx):
node = self._nodes_by_id.get(idx, None)
assert node is not None
return node
def get_input_nodes(self):
input_nodes = []
for node in self.nodes:
if (len(self.parents(node)) == 0) and \
(node.op.type==NNDCT_OP.INPUT or node.op.type==NNDCT_OP.TUPLE_INPUT):
input_nodes.append(node)
return input_nodes
def get_input_tensors(self, input_args):
input_tensors = []
graph_name = self.name
input_nodes = self.get_input_nodes()
for idx in range(len(input_args)):
#input_node_name = graph_name + "::input_" + str(idx)
#input_node = self.node(input_node_name)
input_node = input_nodes[idx]
input_tensor = input_node.out_tensors[0]
if input_node.op.type == NNDCT_OP.INPUT:
input_tensors.append(input_tensor.name)
elif input_node.op.type == NNDCT_OP.TUPLE_INPUT:
for index in range(len(input_args[idx])):
input_tensor_name = input_tensor.name + '.' + str(index)
input_tensors.append(input_tensor_name)
return input_tensors
def get_return_tensors(self):
return_tensors = []
for tensor in self.return_node.in_tensors:
return_tensors.append(tensor.name)
return return_tensors
def add_node(self, node: Node) -> None:
if node.name in self._nodes_by_name:
return
if node.idx in self._nodes_by_id and node is not self._nodes_by_id[node.idx]:
raise RuntimeError(f"The id `{node.idx}` of {node.name} has been added into graph")
if node.idx == -1:
# if not self._nodes_by_id:
# node._idx = 0
# else:
# node._idx = max([node.idx for node in self.all_nodes()]) + 1
node._idx = -sys.maxsize + len(list(self.all_nodes()))
self._nodes_by_name[node.name] = node
self._nodes_by_id[node.idx] = node
def free_node(self, node):
node.owning_graph = None
self._nodes_by_name.pop(node.name)
self._nodes_by_id.pop(node.idx)
def remove_node(self, node):
assert node.in_tensors
assert len(node.out_tensors) == 1
out_tensor = node.out_tensors[0]
inp_tensor = node.in_tensors[0]
out_tensor.replace_uses_with(inp_tensor)
node.destroy()
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
if any([node_type in self.op_types for node_type in node_types]):
nodes_to_remove = []
for node in self.nodes:
if node.op.type in node_types:
nodes_to_remove.append(node)
for node in nodes_to_remove:
self.remove_node(node)
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
conv_nodes = []
for node in self.nodes:
if node.op.type in node_types:
conv_nodes.append(node)
else:
continue
return conv_nodes
def reconnect_nodes(self):
self._nodes_by_id.clear()
for idx, node in enumerate(self.nodes):
node.idx = idx
self._nodes_by_id[idx] = node
node.clean_connections()
self.connect_nodes()
def connect_nodes(self):
for nodeA in self.nodes:
for input_tensor in nodeA.in_tensors:
for nodeB in self.nodes:
if nodeB is not nodeA and input_tensor in nodeB.out_tensors:
#nodeB.outputs.add(input_tensor.node.name)
nodeB.add_out_node(nodeA.name)
nodeA.add_in_node(input_tensor.node.name)
def parents(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.in_nodes]
def children(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.out_nodes]
def add_tensor(self, tensor):
self._tensors[tensor.name] = tensor
def tensor(self, name):
return self._tensors.get(name, None)
def param_tensor(self, name):
for node in self.all_nodes():
for _, tensor in node.op.params.items():
if tensor.name == name:
return tensor
def add_end_tensor(self, tensor):
self._end_tensors.append(tensor)
def __repr__(self):
return f"Graph(name={self.name})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def description(self):
graph_des = {}
graph_des['graph_name'] = f"{self.__class__.__name__}"
graph_des['nodes'] = []
for n in sorted(self.nodes, key=lambda n: n.idx):
graph_des['nodes'].append(n.description())
return graph_des
def set_node_id(self, index, node):
node.idx = index
self._nodes_by_id[index] = node
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
"""
create a subgraph from nodeset belong to origin graph
"""
assert len(nodeset) >= 2
sorted_nodeset = origin_graph.top_sort_nodeset(nodeset)
for node in sorted_nodeset:
node.remove_from_list()
subgraph = cls(graph_name)
sorted_nodeset[0].owning_graph = subgraph
sorted_nodeset[-1].owning_graph = subgraph
block = Block(subgraph, None, sorted_nodeset[0], sorted_nodeset[-1])
subgraph.set_top_block(block)
if len(sorted_nodeset) > 2:
for node in sorted_nodeset[1:-1]:
node.owning_graph = subgraph
subgraph.append_node(node)
return subgraph
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
sorted_nodeset = sorted(nodeset, key=lambda n: n.topo_position)
return sorted_nodeset
def get_topological_graph_nodes_list(self):
nodes_list = [node for node in self.nodes]
return Graph.top_sort_nodeset(nodes_list)
def name(self):
return self._name
def name(self, name):
self._name = name
def nodes(self):
return self._top_block.nodes
def reverse_nodes(self):
return self._top_block.reverse_nodes
def tensors(self):
for tensor in self._tensors.values():
yield tensor
# TODO: Remove
def end_tensors(self):
return [tensor for tensor in self.return_node.in_tensors]
def inputs(self):
return [node for node in self.all_nodes() if not node.in_nodes]
def outputs(self):
return [node for node in self.all_nodes() if not node.out_nodes]
def op_types(self):
return {node.op.type for node in self.all_nodes()}
def append_node(self, node):
self._top_block.append_node(node)
def add_param_name(self, param_name):
if param_name not in self._param_names:
self._param_names.append(param_name)
def param_names(self):
return list(self._param_names)
def block(self):
return self._top_block
def is_tensor_in_graph(self, tensor_name):
return True if tensor_name in self._tensors else False
def update_node_idx(self, node, index):
self._nodes_by_id[index] = node
def clear_node_id_map(self):
self._nodes_by_id.clear()
def remove_tensor(self, tensor):
self._tensors.pop(tensor.name)
if tensor.name in self._param_names:
self._param_names.remove(tensor.name)
def insert_node_between_nodes(self, new_node, parent_node, child_node):
assert parent_node.in_node_list() and child_node.in_node_list()
assert (parent_node.owning_graph == child_node.owning_graph
and parent_node.owning_block == child_node.owning_block)
new_node.owning_block = parent_node.owning_block
new_node.owning_graph = parent_node.owning_graph
tensor = Tensor(name=new_node.name, node=new_node)
new_node.add_out_tensor(tensor)
out_tensor = None
offset = None
for out in parent_node.out_tensors:
for use in out.uses:
if use.user is child_node:
out_tensor = out
offset = use.offset
break
#out_tensor.replace_uses_with(new_node.out_tensors[0])
child_node.replace_input_at(offset, new_node.out_tensors[0])
new_node.add_in_tensor(out_tensor)
new_node.insert_after(parent_node)
def set_top_block(self, block):
self._top_block = block
def add_block(self, block):
self._blocks.append(block)
def all_blocks(self):
return self._blocks
def all_nodes(self):
for _, node in self._nodes_by_name.items():
yield node
def head_node(self):
return self._top_block.input_node
def return_node(self):
return self._top_block.return_node
def clean_tensors_data(self):
for tensor in self.tensors:
tensor.clean_data()
def assign_node_topological_name(self, prefix="", suffix=""):
count = itertools.count(0)
illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
def _assgin_nodes(nodes):
sorted_nodes = self.top_sort_nodeset(list(nodes))
for n in sorted_nodes:
if n.blocks:
for block in n.blocks:
_assgin_nodes(block.nodes)
else:
candidate = illegal_char_regex.sub("_", n.op.type)
n.normalized_name = f"{prefix}{candidate}_{next(count)}{suffix}"
_assgin_nodes(self.nodes)
def simple_description(self):
"""
Only describe op type topological info.
"""
def get_node_simple_info(node):
node_des = {}
node_des['op'] = node.op.type
node_des["input_ops"] = [node.owning_graph.node(inode).op.type for inode in node.in_nodes]
node_des["output_ops"] = [node.owning_graph.node(onode).op.type for onode in node.out_nodes]
if node.blocks:
for i, block in enumerate(node.blocks):
node_des[f'block_{i}'] = []
for n in self.top_sort_nodeset(list(block.nodes)):
node_des[f'block_{i}'].append(get_node_simple_info(n))
return node_des
graph_des = {}
graph_des['nodes'] = []
for n in self.top_sort_nodeset(list(self.nodes)):
graph_des['nodes'].append(get_node_simple_info(n))
graph_str = json.dumps(graph_des, indent=2, separators=(',', ': '))
return graph_str
def get_md5(self):
import hashlib
graph_str = self.simple_description()
md = hashlib.md5()
md.update(graph_str.encode("utf-8"))
return md.hexdigest()
class Node(NodeBase):
"""A node contains an op and its input and output tensor.
"""
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
super().__init__()
self._name = name
self._op = op
self._dtype = dtype
self._idx = -1
self._scope_name = ""
self._source_range = ""
self._normalized_name = ""
self._in_tensors = []
self._out_tensors = []
self._in_nodes = []
self._out_nodes = []
self._blocks = []
self._is_quantizable = in_quant_part
self._is_merged = False
self._transpose_in_order = None
self._transpose_out_order = None
self._topo_position = 0
self._block = None
self._graph = None
self._neighbor_nodes = [None, None]
self._target_device = None
def __repr__(self):
return f"Node(name={self.name}, id={self.idx}, op_type={self.op.type}, quant_state={self.in_quant_part})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_node, local_map):
tmp_attrs = src_node.op._attrs
tmp_params = src_node.op._params
tmp_configs = src_node.op._configs
src_node.op._params = copy.copy(tmp_params)
src_node.op._attrs = copy.copy(tmp_attrs)
src_node.op._configs = copy.copy(tmp_configs)
self.op = copy.copy(src_node.op)
self.op._export_attr_and_param()
src_node.op._attrs = tmp_attrs
src_node.op._params = tmp_params
src_node.op._configs = tmp_configs
self.op.clone_from(src_node.op, local_map)
def scope_name(self):
return self._scope_name
def scope_name(self, name):
self._scope_name = name
def description(self):
node_des = {}
node_des['name'] = self._name
node_des['scope_name'] = self._scope_name
node_des['idx'] = self._idx
node_des['dtype'] = self._dtype
node_des['enable_quant'] = self._is_quantizable
node_des['in_nodes'] = [i for i in self.in_nodes]
node_des['out_nodes'] = [o for o in self.out_nodes]
node_des['in_tensors'] = [it.description() for it in self.in_tensors]
node_des['out_tensors'] = [ot.description() for ot in self.out_tensors]
node_des['op'] = self._op.description()
if self._blocks:
for i, block in enumerate(self._blocks):
node_des[f'block_{i}'] = []
for n in sorted(block.nodes, key=lambda n: n.idx):
node_des[f'block_{i}'].append(n.description())
return node_des
def clean_connections(self):
self._in_nodes = []
self._out_nodes = []
def add_in_node(self, node_name: str):
if node_name not in self._in_nodes:
self._in_nodes.append(node_name)
def add_out_node(self, node_name: str):
if node_name not in self._out_nodes:
self._out_nodes.append(node_name)
def in_tensors(self):
return self._in_tensors
def out_tensors(self):
return self._out_tensors
def in_nodes(self):
nodes = []
for tensor in self.in_tensors:
if tensor.node is not None:
nodes.append(tensor.node.name)
return nodes
def out_nodes(self):
nodes = []
for out in self.out_tensors:
for use in out.uses:
nodes.append(use.user.name)
return nodes
def node_attr(self, key):
return self._op.get_attr(key)
def set_node_attr(self, key, value):
if all([val is None for val in self._op._attr_value_mem[key]]):
self._op.set_attr(key, value)
else:
self._op.update_attr(key, value)
def node_config(self, key):
return self._op.get_config(key)
def set_node_config(self, key, value):
self._op.set_config(key, value)
def has_bound_params(self):
return self._op.has_native_params()
def op_type(self):
return self.op.type
def name(self):
return self._name
def name(self, value):
self._name = value
def idx(self):
return self._idx
def idx(self, index):
self._idx = index
self.owning_graph.update_node_idx(self, index)
def op(self):
return self._op
def op(self, op):
self._op = op
def dtype(self):
return self._dtype
# @property
# def alias(self):
# return self._alias
def in_quant_part(self) -> bool:
return self._is_quantizable
def in_quant_part(self, quant_state: bool) -> None:
self._is_quantizable = quant_state
def module(self):
return self._module()
def module(self, module):
self._module = weakref.ref(module)
def blocks(self):
return self._blocks
def add_block(self, block):
self._blocks.append(block)
def has_custom_op(self):
return isinstance(self.op, CustomOp)
def get_attr_val(self, attr_name):
attr = self.node_attr(attr_name)
return attr.data if isinstance(attr, Tensor) else attr
def merged(self):
return self._is_merged
def merged(self, flag):
self._is_merged = flag
def transpose_in_order(self):
return self._transpose_in_order
def transpose_in_order(self, order):
self._transpose_in_order = order
def transpose_out_order(self):
return self._transpose_out_order
def transpose_out_order(self, order):
self._transpose_out_order = order
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
for attr_name, attr_value in self.op.attrs.items():
if attr_value.value is old_tensor:
self.set_node_attr(attr_name, new_tensor)
def destroy(self):
if len(self.blocks) > 0:
raise RuntimeError("Can't destroy if or loop node.")
while len(self.out_tensors) > 0:
self.remove_output(len(self.out_tensors) - 1)
self.remove_all_inputs()
if self.in_node_list():
self.remove_from_list()
self.owning_graph.free_node(self)
def remove_output(self, i):
assert i < len(self.out_tensors)
assert len(self.out_tensors[i].uses) == 0
output = self.out_tensors.pop(i)
self.owning_graph.remove_tensor(output)
for output_offset in range(i, len(self.out_tensors)):
self.out_tensors[output_offset].offset -= 1
def replace_input_at(self, i, new_tensor):
old_tensor = self.in_tensors[i]
if old_tensor is new_tensor:
return
self.in_tensors[i] = new_tensor
uses = [u for u in old_tensor.uses]
attr_uses = [attr_u for attr_u in old_tensor.attr_uses]
for u in uses:
if u.user is self:
new_tensor.uses.append(u)
old_tensor.uses.remove(u)
for attr_u in attr_uses:
if attr_u.user is self.op:
old_tensor.replace_attr_with_new_tensor_v2(attr_u, new_tensor)
def remove_input(self, i):
self.drop_input(i)
for j in range(i + 1, len(self._in_tensors)):
it = self.find_use_for_input(j)
it.offset -= 1
self._in_tensors.pop(i)
def remove_all_inputs(self):
for i in range(len(self.in_tensors)):
self.drop_input(i)
self.in_tensors.clear()
def drop_input(self, i):
assert i < len(self.in_tensors)
input_value = self.in_tensors[i]
use_it = self.find_use_for_input(i)
input_value.uses.remove(use_it)
self.in_tensors[i] = None
return input_value
def find_use_for_input(self, i):
use_it = None
for use in self.in_tensors[i].uses:
if use.offset == i and use.user is self:
use_it = use
assert use_it is not None
return use_it
def owning_block(self):
return self._block
def owning_block(self, block):
self._block = block
def owning_graph(self):
return self._graph
def owning_graph(self, graph):
self._graph = graph
if self._graph:
self._graph.add_node(self)
def topo_position(self):
return self._topo_position
def topo_position(self, pos):
self._topo_position = pos
def insert_before(self, node):
assert node.in_node_list()
self.insert_after(node.prev_node)
def insert_after(self, node):
assert not self.in_node_list() and node.in_node_list()
assert node.owning_block is not None
self._block = node.owning_block
next_node = node.next_node
node.next_node = self
self.prev_node = node
self.next_node = next_node
next_node.prev_node = self
self.update_topo_position()
def update_topo_position(self):
is_first_node = self.prev_node is self.owning_block.input_node
is_last_node = self.next_node is self.owning_block.return_node
prev_pos = self.prev_node.topo_position
next_pos = self.next_node.topo_position
if is_last_node:
if is_first_node:
self.topo_position = MID_POSITION
return
if prev_pos >= (POSITION_UPPER_BOUND - APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = prev_pos + APPEND_INTERVAL
elif is_first_node:
if next_pos <= (POSITION_LOWER_BOUND + APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = next_pos - APPEND_INTERVAL
else:
pos_between = prev_pos + (next_pos - prev_pos) / 2
if pos_between == prev_pos:
self.owning_block.reindex_topo()
return
self.topo_position = pos_between
def next_node(self):
return self._neighbor_nodes[1]
def next_node(self, node):
self._neighbor_nodes[1] = node
def prev_node(self):
return self._neighbor_nodes[0]
def prev_node(self, node):
self._neighbor_nodes[0] = node
def in_node_list(self):
if self.next_node is None:
assert self.prev_node is None
return self.next_node is not None
def remove_from_list(self):
assert self.in_node_list()
if self.owning_block.input_node is self:
self.owning_block.input_node = self.next_node
self.owning_block = None
next_node = self.next_node
prev_node = self.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
self.next_node = None
self.prev_node = None
def add_in_tensor(self, tensor):
tensor.uses.append(Use(self, len(self.in_tensors)))
self._in_tensors.append(tensor)
self.owning_graph.add_tensor(tensor)
def add_out_tensor(self, tensor):
tensor.offset = len(self.out_tensors)
self._out_tensors.append(tensor)
tensor.node = self
self.owning_graph.add_tensor(tensor)
def target_device(self):
return self._target_device
def target_device(self, device):
self._target_device = device
def scope_name(self):
return self._scope_name
def scope_name(self, scope_name):
self._scope_name = scope_name
def source_range(self):
return self._source_range
def source_range(self, source_range):
self._source_range = source_range
def normalized_name(self):
return self._normalized_name
def normalized_name(self, name):
self._normalized_name = name
def modify_instancenorm(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
# Under test...
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, InstanceNormStructuredExpanding), \
"Variable node_expanding here has to be instance of InstanceNormStructuredExpanding"
input_expanding = expanding_desc[node.in_nodes[0]]
node_expanding.in_dim = input_expanding.out_dim
node_expanding.out_dim = input_expanding.out_dim
node.op.attr["num_features"] = input_expanding.out_dim
for insert in input_expanding.out_inserts:
node_expanding.add_gamma_insert(DataInsert(insert.position, insert.added_num_channels))
node_expanding.add_beta_insert(DataInsert(insert.position, insert.added_num_channels)) | null |
24,023 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def maybe_get_quantizer(quantizer=None):
def quantize_tensors(tensors, node, tensor_names=None, tensor_type='output', method=None):
quant_mode, quantizer = maybe_get_quantizer()
if quantizer is None:
return tensors
elif tensor_type != 'output' and (not node.in_quant_part):
return tensors
# custom op output may need quantization for its following node
elif not node.in_quant_part and not node.op.is_custom_op:
return tensors
qtensors = []
if quant_mode in [1, 3]:
qfunc = quantizer.calibrate
elif quant_mode == 2:
qfunc = quantizer.quantize
tname = node.name
datatype = 'int'
for idx in range(len(tensors)):
if tensor_type == 'param':
tname = tensor_names[idx]
index = 0
else:
index = idx
if (quantizer.need_quantize_tensor(tname, tensor_type)):
if NndctOption.nndct_only_int_quant.value is False:
datatype = quantizer.get_quant_dtype(tname, tensor_type) if tensor_type=='param' else \
quantizer.get_quant_dtype(node.name, tensor_type)
qtensors.append(qfunc(
tensors[idx],
tname,
node,
tensor_type,
index,
method=method,
datatype=datatype))
else:
qtensors.append(tensors[idx])
return qtensors | null |
24,039 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
basename,
line)
return s
def get_logger(name=None, level=None, file_name=None, only2file=False):
"""Return logger instance."""
# global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
# if _logger:
# return _logger
_logger_lock.acquire()
try:
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger(name)
if level:
logger.setLevel(_logging.INFO)
else:
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
if not only2file and all([not isinstance(hdler, _logging.StreamHandler) for hdler in logger.handlers]):
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
# _handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
if file_name is not None:
_file_handler = _logging.FileHandler(file_name)
logger.addHandler(_file_handler)
return logger
finally:
_logger_lock.release()
def debug(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(DEBUG)}
get_logger().debug(msg, extra=extra, *args, **kwargs) | null |
24,043 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def _log_prefix(level, timestamp=None, file_and_line=None):
def get_logger(name=None, level=None, file_name=None, only2file=False):
def fatal(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(FATAL)}
get_logger().fatal(msg, *args, extra=extra, **kwargs) | null |
24,046 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def log(level, msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(level)}
get_logger().log(level, msg, *args, extra=extra, **kwargs)
def min_vlog_level():
global _min_vlog_level
if _min_vlog_level is None:
try:
_min_vlog_level = NndctOption.nndct_logging_level.value
except ValueError:
_min_vlog_level = 0
return _min_vlog_level
def vlog(level, msg, *args, **kwargs):
if level <= min_vlog_level():
log(level, msg, *args, **kwargs) | null |
24,056 | from typing import TypeVar, NoReturn, Optional, Iterator, List
from .option_list import NndctOption
from .option_def import Option, T
class NndctOption(object):
nndct_help = Option(name="help", dtype=bool, default=False, action="store_true",
help="list all api usage description")
nndct_quant_off = Option(name="quant_off", dtype=bool, default=False, action="store_true",
help="disable quantization flow")
nndct_option_list = Option(name="option_list", dtype=bool, default=False, action="store_true",
help="list all the options in nndct")
nndct_parse_debug = Option(name="parse_debug", dtype=int, default=0, env="NNDCT_PARSE_DEBUG",
help="logging graph, 1: torch raw graph, 2: nndct graph 3: nndct quant graph")
nndct_logging_level = Option(name="logging_level", dtype=int, default=0, help="logging level")
nndct_quant_mode = Option(name="quant_mode", dtype=int, default=0,
help="quant mode, 1:calibration, 2:quantization")
nndct_dump_float_format = Option(name="dump_float_format", dtype=int, default=0,
help="deploy check data format, 0: bin, 1: txt")
nndct_record_slow_mode = Option(name="record_slow_mode", dtype=bool, default=False, action="store_true",
help="record outputs every iteration")
nndct_quant_opt = Option(name="quant_opt", dtype=int, default=3, help="quant opt level")
nndct_relu6_replace = Option(name="relu6_replace", dtype=str, default='relu', help="relu6 replace operator")
nndct_equalization = Option(name="equalization", dtype=bool, default=True, action="store_true",
help="enable weights equalization")
# nndct_wes = Option(name="weights_equalizing_shift", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift")
# nndct_wes_in_cle = Option(name="weights_equalizing_shift in cle", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift in cle")
nndct_param_corr = Option(name="param_corr", dtype=bool, default=True, action="store_true",
help="enable parameter correction")
nndct_param_corr_rate = Option(name="param_corr_rate", dtype=float, default=0.05, help="parameter correction rate")
nndct_cv_app = Option(name="cv_app", dtype=bool, default=True, action="store_true", help="cv application")
nndct_finetune_lr_factor = Option(name="finetune_lr_factor", dtype=float, default=0.01, help="finetune learning rate factor")
nndct_partition_mode = Option(name="partition_mode", dtype=int, default=0,
help="0: quant stub controled. 1: custom op controled")
nndct_stat = Option(name="stat", dtype=int, default=0, help="quantizer statistic level")
nndct_jit_script_mode = Option(name="jit_script_mode", dtype=bool, default=False, action="store_true", help="enable torch script parser")
nndct_diffs_mode = Option(name="diffs_mode", dtype=str, default='mse', help="diffs_mode: mse, maxmin")
nndct_ft_mode = Option(name="ft_mode", dtype=int, default=1, help="1: mix mode 0: cache mode")
nndct_visualize = Option(name="visualize", dtype=bool, default=False, action="store_true", help="visualize tensors")
nndct_dump_no_quant_part = Option(name="dump_no_quant_part", dtype=bool, default=False, action="store_true", help="dump no quantized nodes")
nndct_max_fix_position = Option(name="max_fix_position", dtype=int, default=12, help="maximum of fix position")
nndct_use_torch_quantizer = Option(name="use_torch_quantizer", dtype=bool, default=False, action="store_true", help="enable torch quantizer")
nndct_jit_trace = Option(name="jit_trace", dtype=bool, default=False, action="store_true", env="NNDCT_JIT_TRACE", help="parse graph from script tracing")
nndct_jit_script = Option(name="jit_script", dtype=bool, default=False, action="store_true", help="parse graph from script")
nndct_calib_histogram_bins = Option(name="calib_histogram_bins", dtype=int, default=2048, help="calibration histogram bins number")
nndct_mse_start_bin = Option(name="mse_start_bin", dtype=int, default=1536, help="mse calibration method start bin")
nndct_mse_stride = Option(name="mse_stride", dtype=int, default=16, help="mse calibration method stride")
nndct_entropy_start_bin = Option(name="entropy_start_bin", dtype=int, default=1536, help="entropy calibration method start bin")
nndct_entropy_stride = Option(name="entropy_stride", dtype=int, default=16, help="entropy calibration method stride")
nndct_convert_relu6_to_relu = Option(name="convert_relu6_to_relu", dtype=bool, default=False, help="convert relu6 to relu")
nndct_convert_sigmoid_to_hsigmoid = Option(name="convert_sigmoid_to_hsigmoid", dtype=bool, default=False, action="store_true", help="convert sigmoid to hsigmoid")
nndct_convert_silu_to_hswish = Option(name="convert_silu_to_hswish", dtype=bool, default=False, action="store_true", help="convert silu to hswish")
nndct_keep_first_last_layer_accuracy = Option(name="keep_first_last_layer_accuracy", dtype=bool, default=False, help="keep accuracy of first and last layer")
nndct_keep_add_layer_accuracy = Option(name="keep_add_layer_accuracy", dtype=bool, default=False, help="keep accuracy of add layer")
nndct_avg_pool_approximate = Option(name="avg_pool_approximate", dtype=bool, default=True, action="store_true", help="enable average pooling approximate for dpu")
nndct_leaky_relu_approximate = Option(name="leaky_relu_approximate", dtype=bool, default=True, action="store_true", help="enable leaky relu approximate for dpu")
nndct_conv_bn_merge = Option(name="conv_bn_merge", dtype=bool, default=True, action="store_true", help="enable conv and bn merge")
nndct_input_quant_only = Option(name="input_quant_only", dtype=bool, default=False, action="store_false", help="only quantize the input")
nndct_tensorrt_strategy = Option(name="tensorrt_strategy", dtype=bool, default=False, action="store_true", help="use quantization strategy as tensorrt")
nndct_tensorrt_quant_algo = Option(name="tensorrt_quant_algo", dtype=bool, default=False, action="store_true", help="use tensorrt quantization algorithm")
nndct_calibration_local = Option(name="calibration_local", dtype=bool, default=True, action="store_true", help="calibration in local batch data")
nndct_change_concat_input_fix = Option(name="change_concat_input_fix", dtype=bool, default=False, action="store_true", help="change concat input nodes fix point to be the same as concat output node")
nndct_change_pool_input_fix = Option(name="change_pool_input_fix", dtype=bool, default=False, action="store_true", help="change pooling input nodes fix point to be the same as their output node")
nndct_change_add_input_fix = Option(name="change_add_input_fix", dtype=bool, default=False, action="store_true", help="change add input nodes fix point to be the identical")
nndct_insert_concat_input_fix = Option(name="insert_concat_input_fix", dtype=bool, default=False, action="store_true", help="insert concat input nodes fix point to be the same as concat output node")
nndct_export_jit = Option(name="export_jit", dtype=bool, default=False, action="store_true", env="NNDCT_EXPORT_JIT", help="export quant script by inserting fixneuron")
nndct_deploy_check = Option(name="deploy_check", dtype=bool, default=False, action="store_true", help="dump deploy data in forward process")
nndct_input_check = Option(name="input_check", dtype=bool, default=False, action="store_true", help="dump input float data in forward process")
nndct_op_tanh_sigmoid_mode = Option(name="tanh_sigmoid_mode", dtype=str, default='quant_input_output', help="Tanh/sigmoid quantization mode: quant_input_output, table_look_up, simulation, aie2_lut_16bw")
nndct_op_softmax_mode = Option(name="softmax_mode", dtype=str, default='quant_input_output', help="Softmax quantization mode: quant_input_output, hardware_pl, liyi, aie2_lut_16bw, bert_8bw, ipu_8bw")
nndct_op_logsoftmax_mode = Option(name="logsoftmax_mode", dtype=str, default='quant_input_output', help="Logsoftmax quantization mode: quant_input_output, aie2_lut_16bw")
nndct_op_gelu_mode = Option(name="gelu_mode", dtype=str, default='quant_input_output', help="GELU quantization mode: quant_input_output, dynamic_table")
nndct_op_layernorm_mode = Option(name="layernorm_mode", dtype=str, default='quant_input_output', help="Layernorm quantization mode: quant_input_output, aie2_16bw, bert_8bw")
nndct_ip_asr = Option(name="ip_asr", dtype=bool, default=False, action="store_true", help="asr quant method")
nndct_ip_v70_bert = Option(name="ip_v70_bert", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_ip_v70_bert_qat = Option(name="ip_v70_bert_qat", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_use_old_inspector = Option(name="use_old_inspector", dtype=bool, default=False, action="store_true", env="NNDCT_USE_OLD_INSPECTOR", help="switch to old inspector")
nndct_calib_before_finetune = Option(name="calib_before_finetune", dtype=bool, default=False, action="store_true", help="calibration before fast finetune")
nndct_inspect_debug = Option(name="inspect_debug", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_DEBUG", help="turn on inspector")
nndct_op_instancenorm_mode = Option(name="instancenorm_mode", dtype=str, default='quant_input_output', help="Instancenorm quantization mode: quant_input_output, ipu_8bw")
nndct_op_groupnorm_mode = Option(name="groupnorm_mode", dtype=str, default='quant_input_output', help="Groupnorm quantization mode: quant_input_output, ipu_8bw")
nndct_native_onnx = Option(name="native_onnx", dtype=bool, default=False, action="store_true", help="export native quant-dequant onnx models")
nndct_inspect_test = Option(name="inspect_test", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_TEST", help="embed target related test in torch quantizer")
nndct_target = Option(name="target", dtype=str, default="", env="NNDCT_TARGET", help="target name")
nndct_traversal_graph_mode = Option(name="nndct_traversal_graph_mode", dtype=int, default=0, env="NNDCT_GRAPH_SEARCH",help="0: auto, 1:recursion, 2: iteration")
nndct_op_sqrt_mode = Option(name="sqrt_mode", dtype=str, default='quant_input_output', help="sqrt quantization mode: quant_input_output, ipu_8bw")
nndct_onnx_opset_version = Option(name="onnx_opset_version", dtype=int, default=-1, help="opset_version of dumped onnx graph")
nndct_only_int_quant = Option(name="only_int_quant", dtype=bool, default=True, help="only int datatype quantization included")
nndct_gemm88 = Option(name="gemm88", dtype=bool, default=False, action="store_true", help="only quant gemm88 and matmul88")
nndct_pooling_split_mode = Option(name="nndct_pooling_split_mode", dtype=bool, default=False, help="default big pooling will split to small pooling")
nndct_fx_mode = Option(name="fx_mode", dtype=bool, default=False, action="store_true", env="NNDCT_FX_MODE", help="turn on fx mode")
nndct_dump_quant_config = Option(name="dump_quant_config", dtype=bool, default=False, action="store_true", env="NNDCT_DUMP_QUANT_CONFIG", help="dump quant config used")
nndct_close_rich_input_output = Option(name="nndct_close_rich_input_output", dtype=bool, default=False, action="store_true", env="NNDCT_CLOSE_RICH_INPUT_OUTPUT", help="turn off rich I/O for compatibility with legacy code")
T = TypeVar('T')
def set_option_value(option_name: str, option_value: T) -> NoReturn:
NndctOption.__dict__[option_name].value = option_value | null |
24,057 | from typing import TypeVar, NoReturn, Optional, Iterator, List
from .option_list import NndctOption
from .option_def import Option, T
class NndctOption(object):
nndct_help = Option(name="help", dtype=bool, default=False, action="store_true",
help="list all api usage description")
nndct_quant_off = Option(name="quant_off", dtype=bool, default=False, action="store_true",
help="disable quantization flow")
nndct_option_list = Option(name="option_list", dtype=bool, default=False, action="store_true",
help="list all the options in nndct")
nndct_parse_debug = Option(name="parse_debug", dtype=int, default=0, env="NNDCT_PARSE_DEBUG",
help="logging graph, 1: torch raw graph, 2: nndct graph 3: nndct quant graph")
nndct_logging_level = Option(name="logging_level", dtype=int, default=0, help="logging level")
nndct_quant_mode = Option(name="quant_mode", dtype=int, default=0,
help="quant mode, 1:calibration, 2:quantization")
nndct_dump_float_format = Option(name="dump_float_format", dtype=int, default=0,
help="deploy check data format, 0: bin, 1: txt")
nndct_record_slow_mode = Option(name="record_slow_mode", dtype=bool, default=False, action="store_true",
help="record outputs every iteration")
nndct_quant_opt = Option(name="quant_opt", dtype=int, default=3, help="quant opt level")
nndct_relu6_replace = Option(name="relu6_replace", dtype=str, default='relu', help="relu6 replace operator")
nndct_equalization = Option(name="equalization", dtype=bool, default=True, action="store_true",
help="enable weights equalization")
# nndct_wes = Option(name="weights_equalizing_shift", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift")
# nndct_wes_in_cle = Option(name="weights_equalizing_shift in cle", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift in cle")
nndct_param_corr = Option(name="param_corr", dtype=bool, default=True, action="store_true",
help="enable parameter correction")
nndct_param_corr_rate = Option(name="param_corr_rate", dtype=float, default=0.05, help="parameter correction rate")
nndct_cv_app = Option(name="cv_app", dtype=bool, default=True, action="store_true", help="cv application")
nndct_finetune_lr_factor = Option(name="finetune_lr_factor", dtype=float, default=0.01, help="finetune learning rate factor")
nndct_partition_mode = Option(name="partition_mode", dtype=int, default=0,
help="0: quant stub controled. 1: custom op controled")
nndct_stat = Option(name="stat", dtype=int, default=0, help="quantizer statistic level")
nndct_jit_script_mode = Option(name="jit_script_mode", dtype=bool, default=False, action="store_true", help="enable torch script parser")
nndct_diffs_mode = Option(name="diffs_mode", dtype=str, default='mse', help="diffs_mode: mse, maxmin")
nndct_ft_mode = Option(name="ft_mode", dtype=int, default=1, help="1: mix mode 0: cache mode")
nndct_visualize = Option(name="visualize", dtype=bool, default=False, action="store_true", help="visualize tensors")
nndct_dump_no_quant_part = Option(name="dump_no_quant_part", dtype=bool, default=False, action="store_true", help="dump no quantized nodes")
nndct_max_fix_position = Option(name="max_fix_position", dtype=int, default=12, help="maximum of fix position")
nndct_use_torch_quantizer = Option(name="use_torch_quantizer", dtype=bool, default=False, action="store_true", help="enable torch quantizer")
nndct_jit_trace = Option(name="jit_trace", dtype=bool, default=False, action="store_true", env="NNDCT_JIT_TRACE", help="parse graph from script tracing")
nndct_jit_script = Option(name="jit_script", dtype=bool, default=False, action="store_true", help="parse graph from script")
nndct_calib_histogram_bins = Option(name="calib_histogram_bins", dtype=int, default=2048, help="calibration histogram bins number")
nndct_mse_start_bin = Option(name="mse_start_bin", dtype=int, default=1536, help="mse calibration method start bin")
nndct_mse_stride = Option(name="mse_stride", dtype=int, default=16, help="mse calibration method stride")
nndct_entropy_start_bin = Option(name="entropy_start_bin", dtype=int, default=1536, help="entropy calibration method start bin")
nndct_entropy_stride = Option(name="entropy_stride", dtype=int, default=16, help="entropy calibration method stride")
nndct_convert_relu6_to_relu = Option(name="convert_relu6_to_relu", dtype=bool, default=False, help="convert relu6 to relu")
nndct_convert_sigmoid_to_hsigmoid = Option(name="convert_sigmoid_to_hsigmoid", dtype=bool, default=False, action="store_true", help="convert sigmoid to hsigmoid")
nndct_convert_silu_to_hswish = Option(name="convert_silu_to_hswish", dtype=bool, default=False, action="store_true", help="convert silu to hswish")
nndct_keep_first_last_layer_accuracy = Option(name="keep_first_last_layer_accuracy", dtype=bool, default=False, help="keep accuracy of first and last layer")
nndct_keep_add_layer_accuracy = Option(name="keep_add_layer_accuracy", dtype=bool, default=False, help="keep accuracy of add layer")
nndct_avg_pool_approximate = Option(name="avg_pool_approximate", dtype=bool, default=True, action="store_true", help="enable average pooling approximate for dpu")
nndct_leaky_relu_approximate = Option(name="leaky_relu_approximate", dtype=bool, default=True, action="store_true", help="enable leaky relu approximate for dpu")
nndct_conv_bn_merge = Option(name="conv_bn_merge", dtype=bool, default=True, action="store_true", help="enable conv and bn merge")
nndct_input_quant_only = Option(name="input_quant_only", dtype=bool, default=False, action="store_false", help="only quantize the input")
nndct_tensorrt_strategy = Option(name="tensorrt_strategy", dtype=bool, default=False, action="store_true", help="use quantization strategy as tensorrt")
nndct_tensorrt_quant_algo = Option(name="tensorrt_quant_algo", dtype=bool, default=False, action="store_true", help="use tensorrt quantization algorithm")
nndct_calibration_local = Option(name="calibration_local", dtype=bool, default=True, action="store_true", help="calibration in local batch data")
nndct_change_concat_input_fix = Option(name="change_concat_input_fix", dtype=bool, default=False, action="store_true", help="change concat input nodes fix point to be the same as concat output node")
nndct_change_pool_input_fix = Option(name="change_pool_input_fix", dtype=bool, default=False, action="store_true", help="change pooling input nodes fix point to be the same as their output node")
nndct_change_add_input_fix = Option(name="change_add_input_fix", dtype=bool, default=False, action="store_true", help="change add input nodes fix point to be the identical")
nndct_insert_concat_input_fix = Option(name="insert_concat_input_fix", dtype=bool, default=False, action="store_true", help="insert concat input nodes fix point to be the same as concat output node")
nndct_export_jit = Option(name="export_jit", dtype=bool, default=False, action="store_true", env="NNDCT_EXPORT_JIT", help="export quant script by inserting fixneuron")
nndct_deploy_check = Option(name="deploy_check", dtype=bool, default=False, action="store_true", help="dump deploy data in forward process")
nndct_input_check = Option(name="input_check", dtype=bool, default=False, action="store_true", help="dump input float data in forward process")
nndct_op_tanh_sigmoid_mode = Option(name="tanh_sigmoid_mode", dtype=str, default='quant_input_output', help="Tanh/sigmoid quantization mode: quant_input_output, table_look_up, simulation, aie2_lut_16bw")
nndct_op_softmax_mode = Option(name="softmax_mode", dtype=str, default='quant_input_output', help="Softmax quantization mode: quant_input_output, hardware_pl, liyi, aie2_lut_16bw, bert_8bw, ipu_8bw")
nndct_op_logsoftmax_mode = Option(name="logsoftmax_mode", dtype=str, default='quant_input_output', help="Logsoftmax quantization mode: quant_input_output, aie2_lut_16bw")
nndct_op_gelu_mode = Option(name="gelu_mode", dtype=str, default='quant_input_output', help="GELU quantization mode: quant_input_output, dynamic_table")
nndct_op_layernorm_mode = Option(name="layernorm_mode", dtype=str, default='quant_input_output', help="Layernorm quantization mode: quant_input_output, aie2_16bw, bert_8bw")
nndct_ip_asr = Option(name="ip_asr", dtype=bool, default=False, action="store_true", help="asr quant method")
nndct_ip_v70_bert = Option(name="ip_v70_bert", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_ip_v70_bert_qat = Option(name="ip_v70_bert_qat", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_use_old_inspector = Option(name="use_old_inspector", dtype=bool, default=False, action="store_true", env="NNDCT_USE_OLD_INSPECTOR", help="switch to old inspector")
nndct_calib_before_finetune = Option(name="calib_before_finetune", dtype=bool, default=False, action="store_true", help="calibration before fast finetune")
nndct_inspect_debug = Option(name="inspect_debug", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_DEBUG", help="turn on inspector")
nndct_op_instancenorm_mode = Option(name="instancenorm_mode", dtype=str, default='quant_input_output', help="Instancenorm quantization mode: quant_input_output, ipu_8bw")
nndct_op_groupnorm_mode = Option(name="groupnorm_mode", dtype=str, default='quant_input_output', help="Groupnorm quantization mode: quant_input_output, ipu_8bw")
nndct_native_onnx = Option(name="native_onnx", dtype=bool, default=False, action="store_true", help="export native quant-dequant onnx models")
nndct_inspect_test = Option(name="inspect_test", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_TEST", help="embed target related test in torch quantizer")
nndct_target = Option(name="target", dtype=str, default="", env="NNDCT_TARGET", help="target name")
nndct_traversal_graph_mode = Option(name="nndct_traversal_graph_mode", dtype=int, default=0, env="NNDCT_GRAPH_SEARCH",help="0: auto, 1:recursion, 2: iteration")
nndct_op_sqrt_mode = Option(name="sqrt_mode", dtype=str, default='quant_input_output', help="sqrt quantization mode: quant_input_output, ipu_8bw")
nndct_onnx_opset_version = Option(name="onnx_opset_version", dtype=int, default=-1, help="opset_version of dumped onnx graph")
nndct_only_int_quant = Option(name="only_int_quant", dtype=bool, default=True, help="only int datatype quantization included")
nndct_gemm88 = Option(name="gemm88", dtype=bool, default=False, action="store_true", help="only quant gemm88 and matmul88")
nndct_pooling_split_mode = Option(name="nndct_pooling_split_mode", dtype=bool, default=False, help="default big pooling will split to small pooling")
nndct_fx_mode = Option(name="fx_mode", dtype=bool, default=False, action="store_true", env="NNDCT_FX_MODE", help="turn on fx mode")
nndct_dump_quant_config = Option(name="dump_quant_config", dtype=bool, default=False, action="store_true", env="NNDCT_DUMP_QUANT_CONFIG", help="dump quant config used")
nndct_close_rich_input_output = Option(name="nndct_close_rich_input_output", dtype=bool, default=False, action="store_true", env="NNDCT_CLOSE_RICH_INPUT_OUTPUT", help="turn off rich I/O for compatibility with legacy code")
class Option(object):
"""NNDCT option definition.
Attribute:
name(str): option name
dtype(str, int, float, bool): option type
default(T): default value of option
action(str): 'store_true' / 'store_false' only work when dtype is 'bool' [default=None]
help(str): description of option [default=None]
framework(str): 'torch' / 'tensorflow' / 'all' [default='all']
Raises:
DefineOptionError
"""
def __init__(self, name: str, dtype: type, default: T, action: Optional[str] = None, framework: str = "all", help: Optional[str] = None, env=None):
self._name = _OPTION_PREFFIX + name
self._dtype = dtype
self._default = default
self._action = action
self._framework = framework
self._help = help
self._env = env
try:
self._check_attribute_validataion_()
except DefineOptionError as e:
print(e)
_sys.exit(1)
def __str__(self):
return f"""--{self._name} : {self._help} (default={self._default})"""
def _check_attribute_validataion_(self):
if self._dtype not in [str, int, float, bool]:
raise DefineOptionError(self._name, msg=r"The dtype should be 'int/float/bool/string'.")
if self._action not in [None, "store_true", "store_false"]:
raise DefineOptionError(self._name, msg=r"The action value should be ''store_true' / 'store_false''.")
if self._framework not in ["tensorflow", "torch", "all"]:
raise DefineOptionError(self._name, msg=r"The framewok should be ''tensorflow''/''torch''/''all''.")
if type(self._default) != self._dtype:
raise DefineOptionError(self._name, msg=r"The default value type should be the same with dtype.")
if self._dtype != bool and self._action is not None:
raise DefineOptionError(self._name, msg=r"The action is only valid for bool type option.")
def get_env_value(self):
if self._dtype == str:
return os.getenv(self._env, default=self._default)
elif self._dtype in [int, float]:
data = os.getenv(self._env, default=self._default)
return self._dtype(data)
elif self._dtype == bool:
data = os.getenv(self._env)
if data is None:
return self._default
else:
return {"true": True,
"false": False,
"0": False}.get(data.lower(), True)
def dtype(self):
return self._dtype
def action(self):
return self._action
def framework(self):
return self._framework
def value(self):
if hasattr(self, '_value'):
return self._value
elif self._env is not None:
return self.get_env_value()
else:
return self._default
def value(self, value):
if value is None:
self._value = True if self._action == "store_true" else False
else:
self._value = value
def get_all_options() ->Iterator:
for _, option in NndctOption.__dict__.items():
if isinstance(option, Option):
yield option | null |
24,058 | from typing import TypeVar, NoReturn, Optional, Iterator, List
from .option_list import NndctOption
from .option_def import Option, T
class NndctOption(object):
nndct_help = Option(name="help", dtype=bool, default=False, action="store_true",
help="list all api usage description")
nndct_quant_off = Option(name="quant_off", dtype=bool, default=False, action="store_true",
help="disable quantization flow")
nndct_option_list = Option(name="option_list", dtype=bool, default=False, action="store_true",
help="list all the options in nndct")
nndct_parse_debug = Option(name="parse_debug", dtype=int, default=0, env="NNDCT_PARSE_DEBUG",
help="logging graph, 1: torch raw graph, 2: nndct graph 3: nndct quant graph")
nndct_logging_level = Option(name="logging_level", dtype=int, default=0, help="logging level")
nndct_quant_mode = Option(name="quant_mode", dtype=int, default=0,
help="quant mode, 1:calibration, 2:quantization")
nndct_dump_float_format = Option(name="dump_float_format", dtype=int, default=0,
help="deploy check data format, 0: bin, 1: txt")
nndct_record_slow_mode = Option(name="record_slow_mode", dtype=bool, default=False, action="store_true",
help="record outputs every iteration")
nndct_quant_opt = Option(name="quant_opt", dtype=int, default=3, help="quant opt level")
nndct_relu6_replace = Option(name="relu6_replace", dtype=str, default='relu', help="relu6 replace operator")
nndct_equalization = Option(name="equalization", dtype=bool, default=True, action="store_true",
help="enable weights equalization")
# nndct_wes = Option(name="weights_equalizing_shift", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift")
# nndct_wes_in_cle = Option(name="weights_equalizing_shift in cle", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift in cle")
nndct_param_corr = Option(name="param_corr", dtype=bool, default=True, action="store_true",
help="enable parameter correction")
nndct_param_corr_rate = Option(name="param_corr_rate", dtype=float, default=0.05, help="parameter correction rate")
nndct_cv_app = Option(name="cv_app", dtype=bool, default=True, action="store_true", help="cv application")
nndct_finetune_lr_factor = Option(name="finetune_lr_factor", dtype=float, default=0.01, help="finetune learning rate factor")
nndct_partition_mode = Option(name="partition_mode", dtype=int, default=0,
help="0: quant stub controled. 1: custom op controled")
nndct_stat = Option(name="stat", dtype=int, default=0, help="quantizer statistic level")
nndct_jit_script_mode = Option(name="jit_script_mode", dtype=bool, default=False, action="store_true", help="enable torch script parser")
nndct_diffs_mode = Option(name="diffs_mode", dtype=str, default='mse', help="diffs_mode: mse, maxmin")
nndct_ft_mode = Option(name="ft_mode", dtype=int, default=1, help="1: mix mode 0: cache mode")
nndct_visualize = Option(name="visualize", dtype=bool, default=False, action="store_true", help="visualize tensors")
nndct_dump_no_quant_part = Option(name="dump_no_quant_part", dtype=bool, default=False, action="store_true", help="dump no quantized nodes")
nndct_max_fix_position = Option(name="max_fix_position", dtype=int, default=12, help="maximum of fix position")
nndct_use_torch_quantizer = Option(name="use_torch_quantizer", dtype=bool, default=False, action="store_true", help="enable torch quantizer")
nndct_jit_trace = Option(name="jit_trace", dtype=bool, default=False, action="store_true", env="NNDCT_JIT_TRACE", help="parse graph from script tracing")
nndct_jit_script = Option(name="jit_script", dtype=bool, default=False, action="store_true", help="parse graph from script")
nndct_calib_histogram_bins = Option(name="calib_histogram_bins", dtype=int, default=2048, help="calibration histogram bins number")
nndct_mse_start_bin = Option(name="mse_start_bin", dtype=int, default=1536, help="mse calibration method start bin")
nndct_mse_stride = Option(name="mse_stride", dtype=int, default=16, help="mse calibration method stride")
nndct_entropy_start_bin = Option(name="entropy_start_bin", dtype=int, default=1536, help="entropy calibration method start bin")
nndct_entropy_stride = Option(name="entropy_stride", dtype=int, default=16, help="entropy calibration method stride")
nndct_convert_relu6_to_relu = Option(name="convert_relu6_to_relu", dtype=bool, default=False, help="convert relu6 to relu")
nndct_convert_sigmoid_to_hsigmoid = Option(name="convert_sigmoid_to_hsigmoid", dtype=bool, default=False, action="store_true", help="convert sigmoid to hsigmoid")
nndct_convert_silu_to_hswish = Option(name="convert_silu_to_hswish", dtype=bool, default=False, action="store_true", help="convert silu to hswish")
nndct_keep_first_last_layer_accuracy = Option(name="keep_first_last_layer_accuracy", dtype=bool, default=False, help="keep accuracy of first and last layer")
nndct_keep_add_layer_accuracy = Option(name="keep_add_layer_accuracy", dtype=bool, default=False, help="keep accuracy of add layer")
nndct_avg_pool_approximate = Option(name="avg_pool_approximate", dtype=bool, default=True, action="store_true", help="enable average pooling approximate for dpu")
nndct_leaky_relu_approximate = Option(name="leaky_relu_approximate", dtype=bool, default=True, action="store_true", help="enable leaky relu approximate for dpu")
nndct_conv_bn_merge = Option(name="conv_bn_merge", dtype=bool, default=True, action="store_true", help="enable conv and bn merge")
nndct_input_quant_only = Option(name="input_quant_only", dtype=bool, default=False, action="store_false", help="only quantize the input")
nndct_tensorrt_strategy = Option(name="tensorrt_strategy", dtype=bool, default=False, action="store_true", help="use quantization strategy as tensorrt")
nndct_tensorrt_quant_algo = Option(name="tensorrt_quant_algo", dtype=bool, default=False, action="store_true", help="use tensorrt quantization algorithm")
nndct_calibration_local = Option(name="calibration_local", dtype=bool, default=True, action="store_true", help="calibration in local batch data")
nndct_change_concat_input_fix = Option(name="change_concat_input_fix", dtype=bool, default=False, action="store_true", help="change concat input nodes fix point to be the same as concat output node")
nndct_change_pool_input_fix = Option(name="change_pool_input_fix", dtype=bool, default=False, action="store_true", help="change pooling input nodes fix point to be the same as their output node")
nndct_change_add_input_fix = Option(name="change_add_input_fix", dtype=bool, default=False, action="store_true", help="change add input nodes fix point to be the identical")
nndct_insert_concat_input_fix = Option(name="insert_concat_input_fix", dtype=bool, default=False, action="store_true", help="insert concat input nodes fix point to be the same as concat output node")
nndct_export_jit = Option(name="export_jit", dtype=bool, default=False, action="store_true", env="NNDCT_EXPORT_JIT", help="export quant script by inserting fixneuron")
nndct_deploy_check = Option(name="deploy_check", dtype=bool, default=False, action="store_true", help="dump deploy data in forward process")
nndct_input_check = Option(name="input_check", dtype=bool, default=False, action="store_true", help="dump input float data in forward process")
nndct_op_tanh_sigmoid_mode = Option(name="tanh_sigmoid_mode", dtype=str, default='quant_input_output', help="Tanh/sigmoid quantization mode: quant_input_output, table_look_up, simulation, aie2_lut_16bw")
nndct_op_softmax_mode = Option(name="softmax_mode", dtype=str, default='quant_input_output', help="Softmax quantization mode: quant_input_output, hardware_pl, liyi, aie2_lut_16bw, bert_8bw, ipu_8bw")
nndct_op_logsoftmax_mode = Option(name="logsoftmax_mode", dtype=str, default='quant_input_output', help="Logsoftmax quantization mode: quant_input_output, aie2_lut_16bw")
nndct_op_gelu_mode = Option(name="gelu_mode", dtype=str, default='quant_input_output', help="GELU quantization mode: quant_input_output, dynamic_table")
nndct_op_layernorm_mode = Option(name="layernorm_mode", dtype=str, default='quant_input_output', help="Layernorm quantization mode: quant_input_output, aie2_16bw, bert_8bw")
nndct_ip_asr = Option(name="ip_asr", dtype=bool, default=False, action="store_true", help="asr quant method")
nndct_ip_v70_bert = Option(name="ip_v70_bert", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_ip_v70_bert_qat = Option(name="ip_v70_bert_qat", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_use_old_inspector = Option(name="use_old_inspector", dtype=bool, default=False, action="store_true", env="NNDCT_USE_OLD_INSPECTOR", help="switch to old inspector")
nndct_calib_before_finetune = Option(name="calib_before_finetune", dtype=bool, default=False, action="store_true", help="calibration before fast finetune")
nndct_inspect_debug = Option(name="inspect_debug", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_DEBUG", help="turn on inspector")
nndct_op_instancenorm_mode = Option(name="instancenorm_mode", dtype=str, default='quant_input_output', help="Instancenorm quantization mode: quant_input_output, ipu_8bw")
nndct_op_groupnorm_mode = Option(name="groupnorm_mode", dtype=str, default='quant_input_output', help="Groupnorm quantization mode: quant_input_output, ipu_8bw")
nndct_native_onnx = Option(name="native_onnx", dtype=bool, default=False, action="store_true", help="export native quant-dequant onnx models")
nndct_inspect_test = Option(name="inspect_test", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_TEST", help="embed target related test in torch quantizer")
nndct_target = Option(name="target", dtype=str, default="", env="NNDCT_TARGET", help="target name")
nndct_traversal_graph_mode = Option(name="nndct_traversal_graph_mode", dtype=int, default=0, env="NNDCT_GRAPH_SEARCH",help="0: auto, 1:recursion, 2: iteration")
nndct_op_sqrt_mode = Option(name="sqrt_mode", dtype=str, default='quant_input_output', help="sqrt quantization mode: quant_input_output, ipu_8bw")
nndct_onnx_opset_version = Option(name="onnx_opset_version", dtype=int, default=-1, help="opset_version of dumped onnx graph")
nndct_only_int_quant = Option(name="only_int_quant", dtype=bool, default=True, help="only int datatype quantization included")
nndct_gemm88 = Option(name="gemm88", dtype=bool, default=False, action="store_true", help="only quant gemm88 and matmul88")
nndct_pooling_split_mode = Option(name="nndct_pooling_split_mode", dtype=bool, default=False, help="default big pooling will split to small pooling")
nndct_fx_mode = Option(name="fx_mode", dtype=bool, default=False, action="store_true", env="NNDCT_FX_MODE", help="turn on fx mode")
nndct_dump_quant_config = Option(name="dump_quant_config", dtype=bool, default=False, action="store_true", env="NNDCT_DUMP_QUANT_CONFIG", help="dump quant config used")
nndct_close_rich_input_output = Option(name="nndct_close_rich_input_output", dtype=bool, default=False, action="store_true", env="NNDCT_CLOSE_RICH_INPUT_OUTPUT", help="turn off rich I/O for compatibility with legacy code")
class Option(object):
"""NNDCT option definition.
Attribute:
name(str): option name
dtype(str, int, float, bool): option type
default(T): default value of option
action(str): 'store_true' / 'store_false' only work when dtype is 'bool' [default=None]
help(str): description of option [default=None]
framework(str): 'torch' / 'tensorflow' / 'all' [default='all']
Raises:
DefineOptionError
"""
def __init__(self, name: str, dtype: type, default: T, action: Optional[str] = None, framework: str = "all", help: Optional[str] = None, env=None):
self._name = _OPTION_PREFFIX + name
self._dtype = dtype
self._default = default
self._action = action
self._framework = framework
self._help = help
self._env = env
try:
self._check_attribute_validataion_()
except DefineOptionError as e:
print(e)
_sys.exit(1)
def __str__(self):
return f"""--{self._name} : {self._help} (default={self._default})"""
def _check_attribute_validataion_(self):
if self._dtype not in [str, int, float, bool]:
raise DefineOptionError(self._name, msg=r"The dtype should be 'int/float/bool/string'.")
if self._action not in [None, "store_true", "store_false"]:
raise DefineOptionError(self._name, msg=r"The action value should be ''store_true' / 'store_false''.")
if self._framework not in ["tensorflow", "torch", "all"]:
raise DefineOptionError(self._name, msg=r"The framewok should be ''tensorflow''/''torch''/''all''.")
if type(self._default) != self._dtype:
raise DefineOptionError(self._name, msg=r"The default value type should be the same with dtype.")
if self._dtype != bool and self._action is not None:
raise DefineOptionError(self._name, msg=r"The action is only valid for bool type option.")
def get_env_value(self):
if self._dtype == str:
return os.getenv(self._env, default=self._default)
elif self._dtype in [int, float]:
data = os.getenv(self._env, default=self._default)
return self._dtype(data)
elif self._dtype == bool:
data = os.getenv(self._env)
if data is None:
return self._default
else:
return {"true": True,
"false": False,
"0": False}.get(data.lower(), True)
def dtype(self):
return self._dtype
def action(self):
return self._action
def framework(self):
return self._framework
def value(self):
if hasattr(self, '_value'):
return self._value
elif self._env is not None:
return self.get_env_value()
else:
return self._default
def value(self, value):
if value is None:
self._value = True if self._action == "store_true" else False
else:
self._value = value
def add_valid_nndct_option(argv: List[str], option: str, cmd_position: int, framework: str)-> List[str]:
def _set_nndct_option(option_name: str, option_value: str) -> bool:
def _get_option_by_name() -> Optional[Option]:
return NndctOption.__dict__.get(option_name, None)
option = _get_option_by_name()
if option is None: return False
if option.framework != framework and option.framework != 'all': return False
if option.dtype == bool:
if option_value is None and option.action is None:
return False
elif option_value:
if option_value not in ["True", "False"]:
return False
option_value = True if option_value == "True" else False
option.value = option_value
return True
else:
option.value = option_value
return True
else:
try:
option_value = option.dtype(option_value)
except ValueError:
return False
else:
option.value = option_value
return True
def _is_valid_option():
return option.startswith("--")
remove_item = []
if not _is_valid_option(): return remove_item
try:
equal_symbol_idx = option.index("=")
except ValueError:
remove_next_cmd = False
option_name = option[2:]
if cmd_position == len(argv)-1:
option_value = None
elif argv[cmd_position + 1].startswith("--") or argv[cmd_position + 1].startswith("-"):
option_value = None
else:
option_value = argv[cmd_position + 1]
remove_next_cmd = True
if _set_nndct_option(option_name, option_value):
remove_item.append(option)
if remove_next_cmd: remove_item.append(option_value)
else:
if equal_symbol_idx == len(option)-1: return remove_item
option_name = option[2:equal_symbol_idx]
option_value = option[equal_symbol_idx+1:]
if _set_nndct_option(option_name, option_value):
remove_item.append(option)
return remove_item | null |
24,098 | import h5py
import json
from nndct_shared.nndct_graph.base_tensor import Tensor
class GraphHDF5Saver():
def __init__(self, nndct_graph):
self.graph = nndct_graph
def get_node_config(self, node):
node_info = dict()
node_info['idx'] = node.idx
node_info['name'] = node.name
node_info['dtype'] = str(node.dtype)
for idx, tensor in enumerate(node.in_tensors):
node_info['in_tensors{}.name'.format(idx)] = tensor.name
node_info['in_tensors{}.shape'.format(idx)] = tensor.shape
node_info['in_tensors{}.dtype'.format(idx)] = tensor.dtype
for idx, tensor in enumerate(node.out_tensors):
node_info['out_tensors{}.name'.format(idx)] = tensor.name
node_info['out_tensors{}.shape'.format(idx)] = tensor.shape
node_info['out_tensors{}.dtype'.format(idx)] = tensor.dtype
for attr_enum, attr in node.op.attrs.items():
if isinstance(attr.value, Tensor):
continue
elif isinstance(attr.value, (tuple, list)):
has_tensor = False
for val in attr.value:
if isinstance(val, Tensor):
has_tensor = True
break
if not has_tensor:
node_info['Attr.{}'.format(attr_enum.name)] = attr.value
else:
node_info['Attr.{}'.format(attr_enum.name)] = attr.value
return node_info
def get_model_config(self):
model_config = {'name': self.graph.name}
model_config['layers'] = list()
for node in self.graph.nodes:
node_info = dict()
node_info['class_name'] = node.op_type
node_info['name'] = node.name
node_info['inbound_nodes'] = [[[i, 0, 0, {}] for i in node.in_nodes]]
node_info['config'] = self.get_node_config(node)
model_config['layers'].append(node_info)
return model_config
def save(self, hdf5_path):
config = self.get_model_config()
model_config = {'class_name': 'Functional', 'config': config}
metadata = dict(model_config=model_config)
f = h5py.File(hdf5_path, mode='w')
try:
for k, v in metadata.items():
if isinstance(v, (dict, list, tuple)):
f.attrs[k] = json.dumps(v).encode('utf8')
else:
f.attrs[k] = v
f.flush()
finally:
f.close()
def save_graph(nndct_graph, hdf5_path='graph.hdf5'):
GraphHDF5Saver(nndct_graph).save(hdf5_path) | null |
24,146 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
def scale(xgraph, node, quant_config):
class XGraph(object):
def __init__(self, name: str):
def _check_inputs(self, input_ops):
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
def get_op_by_name(self, name: str) -> Op:
def get_op_output_shape(self, name: str) -> List[int]:
def export_to_xmodel(self, fname: str) -> NoReturn:
def export_to_img(self, fname: str) -> NoReturn:
def graph(self):
def avgpool(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
scale = 1.0
if node.node_attr(node.op.AttrName.KERNEL) == [3, 3]:
scale = 9.0 * 7.0 / 64.0
elif node.node_attr(node.op.AttrName.KERNEL) == [5, 5]:
scale = 25.0 * 10.0 / 256.0
elif node.node_attr(node.op.AttrName.KERNEL) in [[6, 6], [3, 6], [6, 3]]:
scale = 36.0 * 7.0 / 256.0
elif node.node_attr(node.op.AttrName.KERNEL) == [7, 7]:
scale = 49.0 * 21.0 / 1024.0
elif node.node_attr(node.op.AttrName.KERNEL) == [14, 14]:
scale = 196.0 * 21.0 / 4096.0
else:
rec = node.node_attr(node.op.AttrName.KERNEL)[0] * node.node_attr(node.op.AttrName.KERNEL)[1]
max_factor = math.ceil(math.log(rec * 128,2))
diff = 1.0
multi_factor = 0.0
shift_factor = 0.0
for shift_factor_ in range(max_factor):
factor = round((2 ** shift_factor_)/rec)
diff_ = abs(factor / (2 ** shift_factor_) - 1/rec)
if diff_ < diff:
multi_factor = factor
diff = diff_
shift_factor = shift_factor_
scale = rec * multi_factor / (2 ** shift_factor)
attrs = _get_xir_attr_from_node(node)
# attrs: Dict[str, Any] = {}
# for attr_name, attr_value in node.op.attrs.items():
# attrs[attr_name.value] = _Converter.to_xir_attr_value(attr_name.value, attr_value.value)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.in_nodes[0])]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name + "_i0", "avgpool2d", quant_config, attrs=attrs, input_ops=input_ops)
scale = [scale]
xgraph.create_fixed_const_op(name=node.name + "_i1",
data=np.array(scale, dtype=np.float32),
quant_info=quant_config)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.name + "_i0"), xgraph.get_op_by_name(node.name + "_i1")]
xgraph.create_fixed_normal_op(
node.name, "mul", quant_config, input_ops=input_ops) | null |
24,171 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class DataInsert(object):
def __init__(self, position: int = 0, added_num_channels: int = 0, added_data: Tensor = None) -> None:
def position(self) -> int:
def added_num_channels(self) -> int:
def added_data(self) -> Tensor:
def added_data(self, data: Tensor) -> None:
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
def node_name(self) -> str:
def in_dim(self) -> int:
def in_dim(self, v: int) -> None:
def out_dim(self) -> int:
def out_dim(self, v: int) -> None:
def added_out_channel(self) -> int:
def added_in_channel(self) -> int:
def out_inserts(self) -> List[DataInsert]:
class GenericStructuredExpanding(StructuredExpanding):
def __init__(self, node_name: str) -> None:
def added_out_channel(self) -> int:
def added_in_channel(self) -> int:
def out_inserts(self) -> List[DataInsert]:
def add_insert(self, insert: DataInsert):
class Graph(GraphBase):
def __init__(self, graph_name=None):
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
def __deepcopy__(self, memo):
def clone(self):
def clone_from(self, src_graph):
def create_node_from(self, src_node, local_map, converted_nodes):
def node(self, name):
def get_node_by_idx(self, idx):
def get_input_nodes(self):
def get_input_tensors(self, input_args):
def get_return_tensors(self):
def add_node(self, node: Node) -> None:
def free_node(self, node):
def remove_node(self, node):
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
def reconnect_nodes(self):
def connect_nodes(self):
def parents(self, node: Union[Node, str]) -> List[Node]:
def children(self, node: Union[Node, str]) -> List[Node]:
def add_tensor(self, tensor):
def tensor(self, name):
def param_tensor(self, name):
def add_end_tensor(self, tensor):
def __repr__(self):
def __str__(self):
def description(self):
def set_node_id(self, index, node):
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
def get_topological_graph_nodes_list(self):
def name(self):
def name(self, name):
def nodes(self):
def reverse_nodes(self):
def tensors(self):
def end_tensors(self):
def inputs(self):
def outputs(self):
def op_types(self):
def append_node(self, node):
def add_param_name(self, param_name):
def param_names(self):
def block(self):
def is_tensor_in_graph(self, tensor_name):
def update_node_idx(self, node, index):
def clear_node_id_map(self):
def remove_tensor(self, tensor):
def insert_node_between_nodes(self, new_node, parent_node, child_node):
def set_top_block(self, block):
def add_block(self, block):
def all_blocks(self):
def all_nodes(self):
def head_node(self):
def return_node(self):
def clean_tensors_data(self):
def assign_node_topological_name(self, prefix="", suffix=""):
def _assgin_nodes(nodes):
def simple_description(self):
def get_node_simple_info(node):
def get_md5(self):
class Node(NodeBase):
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
def __repr__(self):
def __str__(self):
def __deepcopy__(self, memo):
def clone_from(self, src_node, local_map):
def scope_name(self):
def scope_name(self, name):
def description(self):
def clean_connections(self):
def add_in_node(self, node_name: str):
def add_out_node(self, node_name: str):
def in_tensors(self):
def out_tensors(self):
def in_nodes(self):
def out_nodes(self):
def node_attr(self, key):
def set_node_attr(self, key, value):
def node_config(self, key):
def set_node_config(self, key, value):
def has_bound_params(self):
def op_type(self):
def name(self):
def name(self, value):
def idx(self):
def idx(self, index):
def op(self):
def op(self, op):
def dtype(self):
def in_quant_part(self) -> bool:
def in_quant_part(self, quant_state: bool) -> None:
def module(self):
def module(self, module):
def blocks(self):
def add_block(self, block):
def has_custom_op(self):
def get_attr_val(self, attr_name):
def merged(self):
def merged(self, flag):
def transpose_in_order(self):
def transpose_in_order(self, order):
def transpose_out_order(self):
def transpose_out_order(self, order):
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
def destroy(self):
def remove_output(self, i):
def replace_input_at(self, i, new_tensor):
def remove_input(self, i):
def remove_all_inputs(self):
def drop_input(self, i):
def find_use_for_input(self, i):
def owning_block(self):
def owning_block(self, block):
def owning_graph(self):
def owning_graph(self, graph):
def topo_position(self):
def topo_position(self, pos):
def insert_before(self, node):
def insert_after(self, node):
def update_topo_position(self):
def next_node(self):
def next_node(self, node):
def prev_node(self):
def prev_node(self, node):
def in_node_list(self):
def remove_from_list(self):
def add_in_tensor(self, tensor):
def add_out_tensor(self, tensor):
def target_device(self):
def target_device(self, device):
def scope_name(self):
def scope_name(self, scope_name):
def source_range(self):
def source_range(self, source_range):
def normalized_name(self):
def normalized_name(self, name):
def modify_concat(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
offset = 0
out_dim = 0
node_expanding = expanding_desc[node.name]
assert isinstance(node_expanding, GenericStructuredExpanding), \
"Variable node_expanding here has to be instance of GenericStructuredExpanding"
for node in node.in_nodes:
input_expanding = expanding_desc[node]
out_dim += input_expanding.out_dim
for weight_insert in input_expanding.out_inserts:
node_expanding.add_insert(
DataInsert(offset + weight_insert.position, weight_insert.added_num_channels))
offset += input_expanding.out_dim - input_expanding.added_out_channel
node_expanding.out_dim = out_dim | null |
24,178 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def maybe_get_quantizer(quantizer=None):
def quant_reluk_params(node, channel_max):
quant_mode, quantizer = maybe_get_quantizer()
# ignore parameters quantization if the node is not to be quantized
#print('---- quant o: {}, in quant part:{}'.format(node.name, node.in_quant_part))
if not node.in_quant_part or quantizer is None:
return channel_max
if quantizer.need_quantize_tensor(node.name, 'output'):
#print('---- quant o: {}'.format(node.name))
output_name = node.name
#print('qmode = %d, q_end: %d activation: %s' %
# (quant_mode, is_quant_end, output_name))
if quant_mode == 2:
datatype = 'int'
if NndctOption.nndct_only_int_quant.value is False:
datatype = quantizer.get_quant_dtype(node.name, tensor_type='output')
channel_max = quantizer.quantize(
channel_max, output_name, node, tensor_type='output', datatype=datatype)
return channel_max | null |
24,205 | import numpy as np
from nndct_shared.base import NNDCT_OP
def get_in_out_channel_idx(ndim, optype, data_formats):
def get_tensor_out_dim(tensor, optype, data_formats):
_, out_idx = get_in_out_channel_idx(tensor.ndim, optype, data_formats)
return tensor.shape[out_idx] | null |
24,236 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def log_or_print(str, logger=None):
def basic_info(mat, name=None, logger=None, to_str=False):
if isinstance(mat, np.ndarray):
info_str = "<Array>{}[{}]: max:{}, min:{}, sum:{}".format(
'' if not name else name, mat.shape, mat.max(), mat.min(), mat.sum())
else:
info_str = "<Non_Array>{}:{}".format('' if not name else name, mat)
if to_str:
return info_str
else:
log_or_print(info_str, logger=logger) | null |
24,269 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tf_nndct.graph import ops
from tf_nndct.utils import generic_utils
from tf_nndct.utils import viz
def write_binary_proto(path, message):
write_proto(path, message, as_text=False)
def maybe_export_graph(path, graph):
if not os.environ.get('VAI_TF_PARSER_DEBUG', ''):
return
dir_name = os.path.dirname(path)
generic_utils.mkdir_if_not_exist(dir_name)
if isinstance(graph, tf.Graph):
graph = graph.as_graph_def()
write_binary_proto(path, graph)
elif isinstance(graph, graph_pb2.GraphDef):
write_binary_proto(path, graph)
elif isinstance(graph, ops.Graph):
viz.export_to_netron(path, graph)
else:
pass | null |
24,272 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tf_nndct.graph import ops
from tf_nndct.utils import generic_utils
from tf_nndct.utils import viz
def topological_sort(graph):
# Kahn's algorithm.
num_ready_inputs = {}
for node in graph.nodes:
num_ready_inputs[node.name] = 0
ready_nodes = []
for node in graph.inputs:
ready_nodes.append(node)
reordered_nodes = []
while ready_nodes:
node = ready_nodes.pop(0)
reordered_nodes.append(node)
for node_name in node.out_nodes:
out_node = graph.node(node_name)
num_ready_inputs[node_name] = num_ready_inputs[node_name] + 1
if num_ready_inputs[node_name] == out_node.num_inputs:
ready_nodes.append(out_node)
if len(reordered_nodes) != graph.node_size:
all_nodes = {node.name for node in graph.nodes}
ready_nodes = {node.name for node in reordered_nodes}
not_ready_nodes = all_nodes - ready_nodes
detailed_message = ['node_name: num_ready_inputs vs. num_inputs']
for node_name in not_ready_nodes:
node = graph.node(node_name)
detailed_message.append('{}: {} vs. {}'.format(
node_name, num_ready_inputs[node_name], node.num_inputs))
raise RuntimeError(('Couldn\'t sort the graph in topological order as '
'there is at least one cycle in the graph. Not ready '
'nodes: \n{}'.format('\n'.join(detailed_message))))
topo_graph = graph.clone()
topo_graph.clear_nodes()
for node in reordered_nodes:
topo_graph.add_node(node)
return topo_graph | null |
24,274 | import collections
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
class FoldConst(GraphRefiner):
def fold_to_dense(self, const_op, dense_op):
tensor = list(const_op.params.values())[0]
assert len(tensor.shape) == 2
dense_op.param['weights'] = tensor
dense_op.set_config('activation', None)
dense_op.set_config('units', tensor.shape[0])
dense_op.attr['in_dim'] = tensor.shape[1]
def default_fold(self, const_op, op):
for param, value in const_op.params.items():
op.set_param(param, value)
def refine_graph(self, graph):
"""Fetch the input tensor's value, set it as op's param or attribute
and remove the original input node.
"""
fold_map = {OpTypes.DENSE: self.fold_to_dense, OpTypes.BIAS_ADD: None}
nodes_to_remove = []
folded_pairs = []
for node in graph.nodes:
op = node.op
if op.type == OpTypes.RESHAPE:
pass
#in_tensor = node.input_names[1]
#op.set_config('shape', in_tensor.data.tolist())
#nodes_to_remove.append(in_tensor.node)
#folded_pairs.append((in_tensor.node.name, node.name))
elif op.type in fold_map:
const_node = None
for in_node_name in node.in_nodes:
in_node = graph.node(in_node_name)
if in_node.op.type == OpTypes.CONST:
const_node = in_node
break
if const_node:
fold_func = fold_map[op.type]
if not fold_func:
fold_func = self.default_fold
fold_func(const_node.op, op)
nodes_to_remove.append(const_node)
folded_pairs.append((const_node.name, node.name))
else:
pass
for node in nodes_to_remove:
graph.remove_node(node)
msg = '\n'.join(['Fold {} to {}'.format(p[0], p[1]) for p in folded_pairs])
return self.refiner_message(msg)
class FoldBias(GraphRefiner):
def refine_graph(self, graph):
bias_nodes = []
folded_pairs = []
for node in graph.nodes:
if node.op.type == OpTypes.BIAS_ADD:
master_node = graph.node(node.in_nodes[0])
if master_node.op.type == OpTypes.DENSE:
master_node.op.param['bias'] = list(node.op.params.values())[0]
master_node.op.set_config('use_bias', True)
else:
for param, value in node.op.params.items():
master_node.op.set_param(param, value)
bias_nodes.append(node)
folded_pairs.append((node.name, master_node.name))
for node in bias_nodes:
graph.remove_node(node)
msg = '\n'.join(['Fold {} to {}'.format(p[0], p[1]) for p in folded_pairs])
return self.refiner_message(msg)
class RemoveConstantFoldingNode(GraphRefiner):
# for layers.Normalization , activations.gelu
# after get the tf_graph there may generate another node that
# does not belong to the original model, and with name beginning with 'ConstantFolding'
# like 'ConstantFolding/net/normalization/truediv_recip' &
# 'ConstantFolding/net/gelu/Gelu/truediv_recip'
# we need the rm these nodes and del the tensor connected with them,
# usually ConstantFolding node connects with one node,
# and the tensor may have a circle(two tensor) or single-direction(one tensor)
def refine_graph(self, graph):
need_rm_tensor = []
need_rm_node = []
msg = ""
for node in graph.nodes:
# can not judge from node.op.type
if node.name.startswith('ConstantFolding/'):
msg += "for node:\t" + node.name
for output_tensor in node._out_tensors:
need_rm_tensor.append(output_tensor)
node.remove_output(output_tensor)
msg += " rm out_t:\t" + output_tensor.name
for input_tensor in node._in_tensors:
node.remove_input(input_tensor)
msg += " rm in_t:\t" + input_tensor.name
source_node = input_tensor.producer.name
graph.node(source_node).remove_output(input_tensor)
msg += " and rm out_t:\t" + input_tensor.name + " from: " + source_node
# then the constantfold_node will be a isolate node
need_rm_node.append(node)
msg += "\n"
# the we rm input tensor for each node from need_rm_tensor
for rm_tensor in need_rm_tensor:
for node in graph.nodes:
if node.is_consuming(rm_tensor):
node.remove_input(rm_tensor)
msg += " rm in_tensor:\t" + rm_tensor.name + " for: " + node.name + "\n"
for node in need_rm_node:
graph.remove_node(node)
return self.refiner_message(msg)
class RenoveRedundantTensorBetweenTwoNode(GraphRefiner):
# condition 1:
# if layers.Rescaling -> layers.Normalization
# Rescaling = x * scale + offset
# Normalization = (x - mean) / sqrt(var)
# Rescaling->Normalization = (x * scale + offset - mean) / sqrt(var)
# so after getting the tf_graph the Normalization will receive two tensors from rescale
# scale and (offset - mean) we need rm one tensor to let the net tope correct
def refine_graph(self, graph):
def is_rescaling_to_normalization(node):
# if Rescaling -> Normalization and
# tensor in these two node are 2
if node.op.type == OpTypes.RESCALING and \
len(node.out_nodes) == 1 and \
graph.node(node.out_nodes[0]).op.type == OpTypes.NORM and \
node.num_outputs >= 2:
return True
return False
need_rm_tensor_node = []
for node in graph.nodes:
if is_rescaling_to_normalization(node):
need_rm_tensor_node.append(node)
# only remain one tensor
msg = ""
for node in need_rm_tensor_node:
child_node = graph.node(node.out_nodes[0])
msg += "\n process node between {}--and--{}".\
format(node.name, child_node.name)
for tensor in node._out_tensors[1:]:
node.remove_output(tensor)
child_node.remove_input(tensor)
msg += "\t rm tensor:\t{}".format(tensor.name)
return self.refiner_message(msg)
class RemoveIdentity(GraphRefiner):
def refine_graph(self, graph):
nodes_to_remove = []
for node in graph.nodes:
if node.op.type == OpTypes.IDENTITY:
nodes_to_remove.append(node)
for node in nodes_to_remove:
# Graph's structured_output_tensors are output tensors from leaf
# Identity nodes. We need to update the structured_output_tensors
# when these Identity nodes are deleted.
# For example, the orginal graph is as follows:
# Dense(dense:0) -> Identity(dense/linear:0) -> Identity(No output tensor)
# The output tensor is "Identity:0". After the two Identity nodes
# are removed, the output tensor should be updated to "dense:0".
output_tensors = []
for tensor in nest.flatten(graph.structured_output_tensors):
# As the output tensors does not exist in graph, so we can't
# get the output node by tensor's producer, like:
# node = graph.tensor(tensor.name).producer
node_name = tf_utils.node_name_from_input(tensor.name)
if node.name == node_name:
if node.op.type != OpTypes.IDENTITY:
raise RuntimeError(
'The leaf tensors must be generated from Identity node.')
output_tensors.append(node.in_tensors[0])
else:
output_tensors.append(tensor)
graph.structured_output_tensors = nest.pack_sequence_as(
graph.structured_output_tensors, output_tensors)
graph.remove_node(node)
return self.refiner_message(self._msg_for_removing(nodes_to_remove))
class RemoveRNNRedundantInput(GraphRefiner):
"""LSTM nodes usually have some redundant inputs, remove all these nodes."""
def refine_graph(self, graph):
nodes_to_remove = []
for node in graph.nodes:
if node.op.type == OpTypes.LSTM:
for node_name in node.in_nodes:
input_node = graph.node(node_name)
if input_node.op.type in [OpTypes.CONST, OpTypes.LSTM_CELL]:
nodes_to_remove.append(input_node)
for node in nodes_to_remove:
graph.remove_node(node)
return self.refiner_message(self._msg_for_removing(nodes_to_remove))
class RemoveIsolatedNode(GraphRefiner):
def refine_graph(self, graph):
def is_isolated(node):
return node.num_inputs == 0 and node.num_outputs == 0
graph, removed_nodes = self._remove_nodes_if(graph, is_isolated)
return self.refiner_message(self._msg_for_removing(removed_nodes))
class MergeBidirectionalRNN(GraphRefiner):
def refine_graph(self, graph):
nodes_to_remove = []
for node in graph.nodes:
if node.op.type == OpTypes.BIDIRECTIONAL_RNN:
nodes_to_remove.extend(graph.parents(node))
for node in nodes_to_remove:
graph.remove_node(node)
for node in graph.nodes:
if node.op.type != OpTypes.BIDIRECTIONAL_RNN:
continue
in_tensors = node.in_tensors
assert in_tensors[0].name == in_tensors[1].name
node.remove_input(in_tensors[1])
return self.refiner_message(self._msg_for_removing(nodes_to_remove))
class RenameParamTensor(GraphRefiner):
"""Rename param tensor with a more readable name."""
def refine_graph(self, graph):
msg = []
for node in graph.nodes:
for param, tensor in node.op.params.items():
# param either be a string or Enum defined in op's ParamName.
param_name = param if isinstance(param, str) else param.name.lower()
new_name = node.name + ':' + param_name
msg.append('%s -> %s' % (tensor.name, new_name))
tensor.name = new_name
return self.refiner_message(', '.join(msg))
class SetAttrForBinaryOp(GraphRefiner):
"""Set 'input' and 'other' for binary operations. These two attrs are used
for exporting to xir.
"""
def refine_graph(self, graph):
refined_ops = []
binary_ops = [OpTypes.MULTIPLY]
for node in graph.nodes:
if node.op.type not in binary_ops:
continue
assert len(
node.in_tensors
) == 2, 'Binary operation should have 2 inputs, but got {}'.format(
len(node.in_tensors))
node.op.attr['input'] = node.in_tensors[0]
node.op.attr['other'] = node.in_tensors[1]
refined_ops.append(node.name)
return self.refiner_message('{}'.format(refined_ops))
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def run_graph_refining(graph):
# Executed in sequence.
refiners = [
FoldConst, FoldBias, RemoveIdentity, RemoveRNNRedundantInput,
RemoveIsolatedNode, MergeBidirectionalRNN, RenameParamTensor,
SetAttrForBinaryOp, RemoveConstantFoldingNode,
RenoveRedundantTensorBetweenTwoNode
]
for refiner_cls in refiners:
refiner = refiner_cls()
result = refiner.refine_graph(graph)
logging.vlog(
2, 'Refining pass [{}]: {}'.format(result.refiner, result.message))
return graph | null |
24,275 | import json
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.core.protobuf import config_pb2
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tf_nndct.graph import OpTypes
from tf_nndct.graph import converter
from tf_nndct.graph import ops
from tf_nndct.graph import refiner
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
def get_func_graph(model, input_signature=None):
# TODO(yuwang) Use trace_model_call from keras function directly.
#from tensorflow.python.keras.saving import saving_utils
#func = saving_utils.trace_model_call(model, input_signature)
concrete_func = keras_utils.trace_model_call(model, [input_signature])
frozen_func = tf_utils.convert_to_constants(
concrete_func, lower_control_flow=False)
graph_def = frozen_func.graph.as_graph_def()
utils.maybe_export_graph(
os.path.join(_EXPORT_DIR, _FROZEN_FUNC_GRAPH), graph_def)
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != tf.dtypes.resource
]
output_tensors = frozen_func.outputs
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
#rewrite_options.constant_folding = rewrite_options.ON
rewrite_options.optimizers.append('constfold')
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=config,
graph=frozen_func.graph)
utils.maybe_export_graph(os.path.join(_EXPORT_DIR, _OPT_TF_GRAPH), graph_def)
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name='')
func_graph = concrete_func.graph
return (tf_graph, func_graph.structured_input_signature,
func_graph.structured_outputs)
def map_scope_to_layer(layer, scope='', parent=None):
if not isinstance(layer, keras.layers.Layer):
return {}
scope_to_layer = {}
layer_scope = "/".join([scope, layer.name]) if scope else layer.name
scope_to_layer[layer_scope] = (layer, parent)
# There is no _gather_unique_layers in earlier TF.
# layers = layer._gather_unique_layers()
layers = keras_utils.get_layers(layer)
for sub_layer in layers:
layer_dict = map_scope_to_layer(sub_layer, layer_scope, layer)
scope_to_layer.update(layer_dict)
return scope_to_layer
def get_raw_graph(func_graph, scope_to_layer=None):
# op_name => Node name
tf_graph, input_signature, structured_output_tensors = func_graph
computation_graph = ComputationGraph.from_tf_graph(tf_graph, scope_to_layer)
logging.vlog(2, 'ComputationGraph\n {}'.format(computation_graph))
# Parse computation nodes to nndct nodes.
nndct_nodes = []
for node in computation_graph.nodes:
nndct_nodes.extend(converter.convert(node))
# Create all tensors
tensors = {}
for node in nndct_nodes:
for name in node.output_names:
tensors[name] = node.produce(name)
# Build connections.
for node in nndct_nodes:
for name in node.input_names:
node.consume(tensors[name])
graph = ops.Graph()
for node in nndct_nodes:
graph.add_node(node)
# The tensors in FuncGraph.structured_output_tensors are outputs from
# Identity node added to the graph. Since all Identity nodes will be removed
# in graph refining, so we have to find the actual output tensors before
# that process.
#output_tensors = []
#for tensor in nest.flatten(structured_output_tensors):
# # The output tensors does not exist in graph, so we can't get the output
# # node by tensor's producer, like:
# # node = graph.tensor(tf_tensor.name).producer
# node_name = tf_utils.node_name_from_input(tensor.name)
# node = graph.node(node_name)
# assert node.op.type == OpTypes.IDENTITY
# output_tensors.append(node.in_tensors[0])
#output_tensors = nest.pack_sequence_as(structured_output_tensors,
# output_tensors)
# Get args part from input_signature (args, kwargs)
graph.input_signature = input_signature[0]
graph.structured_output_tensors = structured_output_tensors
utils.maybe_export_graph(os.path.join(_EXPORT_DIR, _RAW_NNDCT_GRAPH), graph)
return graph
def run_graph_refining(graph):
graph = refiner.run_graph_refining(graph)
utils.maybe_export_graph(os.path.join(_EXPORT_DIR, _FINAL_NNDCT_GRAPH), graph)
return graph
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
The provided code snippet includes necessary dependencies for implementing the `from_keras_model` function. Write a Python function `def from_keras_model(model, input_signature=None)` to solve the following problem:
Trace model call to get a func graph and convert that func graph to nndct graph.
Here is the function:
def from_keras_model(model, input_signature=None):
"""Trace model call to get a func graph and convert that func graph
to nndct graph.
"""
logging.vlog(1, 'input_signature: {}'.format(input_signature))
# TODO haoliang
# Note1: Support `Functional API` format Model, subclassing the `Model` may cause errors
# Two ways to instantiate a `Model`: https://github.com/keras-team/keras/blob/v2.10.0/keras/engine/training.py#L69
func_graph = get_func_graph(model, input_signature)
scope_to_layer = map_scope_to_layer(model)
logging.vlog(
1, 'scope_name: (layer, parent_layer)\n{}'.format('\n'.join(
[f'{key}: {value}' for key, value in scope_to_layer.items()])))
graph = get_raw_graph(func_graph, scope_to_layer)
graph = run_graph_refining(graph)
logging.vlog(2, 'NndctGraph before sorting:\n{}'.format(graph))
graph = utils.topological_sort(graph)
graph.name = model.name
graph.data_format = keras_utils.data_format()
logging.vlog(2, 'Final parsed graph:\n{}'.format(graph))
return graph | Trace model call to get a func graph and convert that func graph to nndct graph. |
24,278 | import imp
from tensorflow import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
from nndct_shared.pruning import pruning_lib
from tf_nndct.graph import OpTypes
from tf_nndct.graph import parser
from tf_nndct.graph import utils
from tf_nndct.graph import writer as writer_lib
from tf_nndct.layers import base_layer
from tf_nndct.utils import keras_utils
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
class KerasBuilder(object):
def __init__(self, graph):
def build(self, filepath, quantized=False, as_layer=False):
def rebuild_model(model, input_signature, path=None):
graph = parser.from_keras_model(model, input_signature)
builder = KerasBuilder(graph)
if not path:
path = '{}_rebuilt.py'.format(model.name)
rebuilt_model, layer_names = builder.build(path)
return rebuilt_model | null |
24,281 | import numpy as np
from enum import Enum
from tensorflow.core.framework import types_pb2
_TF_TO_NNDCT = {
types_pb2.DT_FLOAT: DType.FLOAT,
types_pb2.DT_HALF: DType.FLOAT16,
types_pb2.DT_DOUBLE: DType.DOUBLE,
types_pb2.DT_INT32: DType.INT32,
types_pb2.DT_INT16: DType.INT16,
types_pb2.DT_INT8: DType.INT8,
types_pb2.DT_UINT8: DType.UINT8,
types_pb2.DT_UINT16: DType.UINT16,
types_pb2.DT_INT64: DType.INT64,
types_pb2.DT_STRING: DType.STRING,
types_pb2.DT_COMPLEX64: DType.COMPLEX64,
types_pb2.DT_COMPLEX128: DType.COMPLEX128,
types_pb2.DT_BOOL: DType.BOOL,
types_pb2.DT_QUINT8: DType.QUINT8
}
def from_tf(dtype):
return _TF_TO_NNDCT[dtype] | null |
24,282 | import numpy as np
from enum import Enum
from tensorflow.core.framework import types_pb2
_NNDCT_TO_TF = {nndct: tf for tf, nndct in _TF_TO_NNDCT.items()}
def to_tf(dtype):
return _NNDCT_TO_TF[dtype] | null |
24,283 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
keras = tf.keras
_NO_LAYER_NAME = '_NO_LAYER_NAME'
_node_converter_registry = registry.Registry('node_converter')
def _convert_node_to_generic(node):
"""Convert a node with unregistered type to a generic node by
saving the node's config as-is.
"""
op = OpBuilder(op_def.TFGeneric, node.get_config(),
node.get_params()).attr('layer_class', type(node.op)).build()
return create_node(node.name, op, node.input_names, node.output_names)
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(node)` to solve the following problem:
Convert a parser's computation node to one or more TF graph's nodes. Looks up node's convertion function in the registry and calls it to generate a new ops.Node object according to the attributes of node. The node's name will be used to set the name of the converted node. A tf.keras.layers.Layer instance without type registry will be converted to a TFGeneric node. Args: node: A `ComputationNode` object. Returns: A `ops.Node` converted from `ComputationNode`.
Here is the function:
def convert(node):
"""Convert a parser's computation node to one or more TF graph's nodes.
Looks up node's convertion function in the registry and calls it to
generate a new ops.Node object according to the attributes of node.
The node's name will be used to set the name of the converted node.
A tf.keras.layers.Layer instance without type registry will be converted
to a TFGeneric node.
Args:
node: A `ComputationNode` object.
Returns:
A `ops.Node` converted from `ComputationNode`.
"""
if node.type in _node_converter_registry:
convert_func = _node_converter_registry.lookup(node.type)
nodes = convert_func(node)
elif node.op_type_name in ["TensorFlowOpLayer", "TFOpLambda"]:
# op e.g ['+', tf.concat] will bre transfer to
# TensorFlowOpLayer or TFOpLambda in different version tf
convert_func = _node_converter_registry.lookup(node.op_type_name)
nodes = convert_func(node)
elif isinstance(node.op, layers.Layer):
nodes = _convert_node_to_generic(node)
elif isinstance(node.op, tf.Operation):
nodes = _convert_node_to_generic(node)
else:
raise NotImplementedError("Unable to parse {}:\n{}".format(
node.type, node.op))
converted_nodes = nest.flatten(nodes)
for cn in converted_nodes:
if isinstance(node.op,
keras.layers.Layer) and cn.layer_name != _NO_LAYER_NAME:
cn.layer_name = node.op.name
cn.inbound_nodes = node.inbound_nodes
return converted_nodes | Convert a parser's computation node to one or more TF graph's nodes. Looks up node's convertion function in the registry and calls it to generate a new ops.Node object according to the attributes of node. The node's name will be used to set the name of the converted node. A tf.keras.layers.Layer instance without type registry will be converted to a TFGeneric node. Args: node: A `ComputationNode` object. Returns: A `ops.Node` converted from `ComputationNode`. |
24,284 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
_tf_type_to_nndct = {
'Add': op_def.TFAdd,
'AddV2': op_def.TFAdd,
'BiasAdd': op_def.TFBiasAdd,
'Identity': op_def.TFIdentity,
'NoOp': op_def.TFNoOp,
'Reshape': op_def.TFReshape,
'Sigmoid': op_def.TFSigmoid,
'Tanh': op_def.TFTanh,
'GatherV2': op_def.TFGather,
'RFFT': op_def.TFRFFT,
'ComplexAbs': op_def.TFComplexAbs,
'Angle': op_def.TFAngle,
'Exp': op_def.TFExp,
'IRFFT': op_def.TFIRFFT,
'Pad': op_def.TFPad,
'Transpose': op_def.TFTranspose,
'Sum': op_def.TFSum,
'reshape': op_def.TFReshape,
'concat': op_def.TFConcat,
'ConcatV2': op_def.TFConcat,
'__operators__.add': op_def.TFAdd,
}
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_simple_tf_op(node):
op = _tf_type_to_nndct[node.type]()
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,285 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
if tf_utils.is_tf_version_greater_equal('2.9.0'):
from keras.layers.rnn import lstm as recurrent_v2
from keras.layers.rnn import lstm_v1 as recurrent
elif tf_utils.is_tf_version_greater_equal('2.6'):
# Keras is seperate from tensorflow since tf 2.6
from keras.layers import recurrent
from keras.layers import recurrent_v2
else:
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.layers import recurrent_v2
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
if tf_utils.is_tf_version_greater_equal('2.6'):
normalization_layer = layers.Normalization
else:
normalization_layer = layers.experimental.preprocessing.Normalization
if tf_utils.is_tf_version_greater_equal('2.6'):
rescaling_layer = layers.Rescaling
else:
rescaling_layer = layers.experimental.preprocessing.Rescaling
def create_node(name, op, input_names, output_names):
def convert_op_placeholder(node):
config = node.get_config()
shape = tf_utils.tf_shape_to_list(config['shape'])
dtype = dtypes.from_tf(config['dtype'])
op = (
OpBuilder(op_def.TFInput).config('shape', shape).config('dtype',
dtype).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,286 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_op_cast(node):
config = node.get_config()
op = (
OpBuilder(op_def.TFCast, None, None).config('dtype',
config['DstT']).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,289 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_op_strided_slice(node):
op = (OpBuilder(op_def.TFStridedSlice, node.get_config()).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,290 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_node(name, op, input_names, output_names):
def convert_op_matmul(node):
# Parse MatMul to Dense without bias.
op = (OpBuilder(op_def.TFDense).config('use_bias', False).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,293 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_batchnorm(node):
config = node.get_config()
params = node.get_params()
# See https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/python/keras/layers/normalization.py#L358
if config['scale']:
param_shape = params['gamma'].shape
elif config['center']:
param_shape = params['beta'].shape
else:
param_shape = params['moving_mean'].shape
for dim in param_shape:
if dim != 1:
out_dim = dim
break
config_axis = config['axis']
axis = config_axis[0] if isinstance(config_axis, list) else config_axis
op = (
OpBuilder(op_def.TFBatchNorm, config,
params).attr('out_dim', out_dim).attr('axis', axis).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,294 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_activation_node(kernel_node, activation, output_names):
actv = activations.get(activation)
#op = ops.Operation(_activation_cvt_map[actv])
op = _activation_cvt_map[actv]()
actv_name = activations.serialize(actv)
actv_node = ops.Node('/'.join([kernel_node.name, actv_name]), op)
# Connect the activation to the kernel
kernel_out_tensor = kernel_node.name + ':0'
kernel_node.output_names = [kernel_out_tensor]
actv_node.input_names = [kernel_out_tensor]
actv_node.output_names = copy.deepcopy(output_names)
# Dettach activation node from its kernel node.
# The layer name of activation node should not be set to kernel's layer name.
actv_node.layer_name = _NO_LAYER_NAME
return actv_node
def convert_layer_dense(node):
config = node.get_config()
params = node.get_params()
op = (
OpBuilder(op_def.TFDense, config, params).config('activation', None).attr(
'activation',
config['activation']).attr('in_dim',
params['kernel'].shape[0]).build())
dense_node = ops.Node(node.name, op)
dense_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(dense_node, config['activation'],
node.output_names)
return [dense_node, actv_node]
else:
dense_node.output_names = copy.deepcopy(node.output_names)
return dense_node | null |
24,295 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def _convert_node_to_generic(node):
"""Convert a node with unregistered type to a generic node by
saving the node's config as-is.
"""
op = OpBuilder(op_def.TFGeneric, node.get_config(),
node.get_params()).attr('layer_class', type(node.op)).build()
return create_node(node.name, op, node.input_names, node.output_names)
_tf_type_to_nndct = {
'Add': op_def.TFAdd,
'AddV2': op_def.TFAdd,
'BiasAdd': op_def.TFBiasAdd,
'Identity': op_def.TFIdentity,
'NoOp': op_def.TFNoOp,
'Reshape': op_def.TFReshape,
'Sigmoid': op_def.TFSigmoid,
'Tanh': op_def.TFTanh,
'GatherV2': op_def.TFGather,
'RFFT': op_def.TFRFFT,
'ComplexAbs': op_def.TFComplexAbs,
'Angle': op_def.TFAngle,
'Exp': op_def.TFExp,
'IRFFT': op_def.TFIRFFT,
'Pad': op_def.TFPad,
'Transpose': op_def.TFTranspose,
'Sum': op_def.TFSum,
'reshape': op_def.TFReshape,
'concat': op_def.TFConcat,
'ConcatV2': op_def.TFConcat,
'__operators__.add': op_def.TFAdd,
}
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_tf_op_lambda(node):
op_name = node.get_config()["function"]
if op_name in _tf_type_to_nndct:
op = OpBuilder(_tf_type_to_nndct[op_name], node.get_config(), None).build()
return create_node(node.name, op, node.input_names, node.output_names)
else:
return _convert_node_to_generic(node) | null |
24,302 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
def _parse_base_rnn(layer):
if keras_utils.is_stacked_rnn_cells(layer.cell):
cell_op = _parse_stacked_rnn_cells(layer.cell)
else:
cell_op = _parse_rnn_layer(layer.cell)
op = (
OpBuilder(op_def.TFRNN, layer.get_config(), None).config('cell',
cell_op).build())
# Inherit params from cell op.
for name, tensor in cell_op.params.items():
op.set_param(name, tensor)
return op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_rnn(node):
layer = node.op
op = _parse_base_rnn(layer)
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,305 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_node(name, op, input_names, output_names):
def convert_layer_gru(node):
op = OpBuilder(op_def.TFGRU, node.get_config(), node.get_params()).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,313 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_node(name, op, input_names, output_names):
def convert_layer_reshape(node):
op = OpBuilder(op_def.TFReshape, node.get_config()).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
24,315 | import os
from typing import Optional
import tensorflow as tf
from nndct_shared.base import NNDCT_KEYS, NNDCT_OP, GLOBAL_MAP
from nndct_shared.utils import option_util, NndctOption, NndctScreenLogger
from tf_nndct.graph import OpTypes
from tf_nndct.graph import builder
from tf_nndct.graph import ops
from tf_nndct.graph import parser
from tf_nndct.graph import utils as graph_utils
from tf_nndct.layers import recurrent
from tf_nndct.quantization import TFQuantizer
from tf_nndct.utils import keras_utils
from tf_nndct.utils import tf_utils
from tf_nndct.quantization import RNNTFQConfig
from tensorflow.keras import activations
def _init_quant_mode(quant_mode):
if isinstance(quant_mode, int):
NndctScreenLogger().warning(
f"quant_mode will not support integer value in future version. It supports string values 'calib' and 'test'."
)
qmode = quant_mode
elif isinstance(quant_mode, str):
if quant_mode == 'calib':
qmode = 1
elif quant_mode == 'test':
qmode = 2
else:
NndctScreenLogger().error(
f"quant_mode supported values are 'calib' and 'test'. Change it to 'calib' as calibration mode"
)
qmode = 1
else:
NndctScreenLogger().error(
f"quant_mode supported values are string 'calib' and 'test'. Change it to 'calib' as calibration mode"
)
qmode = 1
if NndctOption.nndct_quant_mode.value > 0:
qmode = NndctOption.nndct_quant_mode.value
if qmode == 1:
NndctScreenLogger().info(f"Quantization calibration process start up...")
elif qmode == 2:
NndctScreenLogger().info(f"Quantization test process start up...")
return qmode
def _merge_cell_graphs(cell_graphs):
def prepend_scope(obj, scope):
obj.name = '{}/{}'.format(scope, obj.name)
def rename_input(node):
# XXX(yuwang): Modify input nodes names to input_0, input_1... to make it
# easier for the compiler to inditify the inputs. We need to design
# this more rationally in the future.
# The node name follow the correspondence rules:
# input_0 -> inputs
# input_1 -> H(t-1)
# input_2 -> C(t-1)
if node.op.type != OpTypes.INPUT:
return
arg_to_input = {
'args_0': 'input_0',
'args_1': 'input_1',
'args_1_1': 'input_2'
}
name_parts = node.name.split('/')
name_parts[-1] = arg_to_input[name_parts[-1]]
node.name = '/'.join(name_parts)
graph = ops.Graph()
for cell_graph in cell_graphs:
scope = cell_graph.name
for node in cell_graph.nodes:
rename_input(node)
prepend_scope(node, scope)
for tensor in node.out_tensors:
prepend_scope(tensor, scope)
graph.add_node(node)
return graph
def _maybe_rebuild_rnn(model):
rebuilding_results = []
layers = keras_utils.gather_layers(model)
for layer in layers:
# TODO(yuwang): Support StackedRNNCells, RNN
if not isinstance(layer, recurrent.LSTM):
continue
cell = layer.cell
assert cell.recurrent_activation == activations.get(
'sigmoid'), 'recurrent_activation must be "sigmoid"'
graph_name = 'rnn_cell_%d' % len(rebuilding_results)
cell_graph = _parse_rnn_cell(cell)
cell_graph.name = graph_name
rebuilt_cell, layer_nodes = builder.KerasBuilder(cell_graph).build(
os.path.join('quantize_result', graph_name + '.py'), quantized=True)
rebuilding_results.append((cell_graph, layer_nodes))
_copy_attr('units', cell, rebuilt_cell)
_copy_attr('state_size', cell, rebuilt_cell)
_copy_attr('output_size', cell, rebuilt_cell)
layer.cell = rebuilt_cell
return rebuilding_results
GLOBAL_MAP = GlobalMap()
def tf_quantizer(model,
input_signature,
quant_mode: str = "calib",
output_dir: str = "quantize_result",
bitwidth: int = 8,
quant_config_file: Optional[str] = None):
#initialize quant mode
qmode = _init_quant_mode(quant_mode)
# turn off weights equalization and bias correction
option_util.set_option_value("nndct_param_corr", False)
option_util.set_option_value("nndct_equalization", False)
# parse the quant config file
QConfiger = RNNTFQConfig()
#if quant_config_file:
QConfiger.parse_config_file(
quant_config_file, bit_width_w=bitwidth, bit_width_a=bitwidth)
qconfig = QConfiger.qconfig
if NndctOption.nndct_dump_quant_config.value is True:
config_dump_file = '/'.join([output_dir, 'effective_config.json'])
QConfiger.dump_quant_config(config_dump_file)
# lstm IP only support 16 bit activation
quantizer = TFQuantizer(qmode, output_dir, qconfig)
GLOBAL_MAP.set_map(NNDCT_KEYS.QUANTIZER, quantizer)
GLOBAL_MAP.set_map(NNDCT_KEYS.QUANT_MODE, qmode)
GLOBAL_MAP.set_map(NNDCT_KEYS.QUANT_CONFIG, qconfig)
graph = parser.from_keras_model(model, input_signature)
quant_model, layer_nodes = builder.KerasBuilder(graph).build(
os.path.join(output_dir, model.name + '_quant.py'), quantized=True)
rebuilding_results = _maybe_rebuild_rnn(quant_model)
if rebuilding_results:
cell_graphs = []
cell_layer_nodes = []
for graph, layer_nodes in rebuilding_results:
cell_graphs.append(graph)
cell_layer_nodes.extend(layer_nodes)
quantizer.add_rnn_cell_graph('forward', graph)
graph = _merge_cell_graphs(cell_graphs)
layer_nodes = cell_layer_nodes
# TODO(yuwang): Support backward direction.
export_file = os.path.join(output_dir, 'merged_graph.pb')
graph_utils.maybe_export_graph(export_file, graph)
lstm = True if len(rebuilding_results) > 0 else False
quantizer.setup(graph, lstm=lstm)
quantizer.load_node_to_layer(layer_nodes, quant_model)
return quantizer | null |
24,328 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
The provided code snippet includes necessary dependencies for implementing the `values_from_tf_const` function. Write a Python function `def values_from_tf_const(node_def)` to solve the following problem:
Extracts the values from a const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that has the values we want to access. Returns: Numpy ndarray containing the values. Raises: ValueError: If the node isn't a Const.
Here is the function:
def values_from_tf_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError("Node '%s' should be a Const op." % node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value | Extracts the values from a const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that has the values we want to access. Returns: Numpy ndarray containing the values. Raises: ValueError: If the node isn't a Const. |
24,330 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
The provided code snippet includes necessary dependencies for implementing the `tf_shape_to_list` function. Write a Python function `def tf_shape_to_list(shape)` to solve the following problem:
Get shape from tensorflow attr 'shape'.
Here is the function:
def tf_shape_to_list(shape):
"""Get shape from tensorflow attr 'shape'."""
dims = None
try:
if not shape.unknown_rank:
dims = [int(d.size) for d in shape.dim]
except: # pylint: disable=bare-except
pass
return dims | Get shape from tensorflow attr 'shape'. |
24,333 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def write_proto(path, message, as_text=False):
def write_binary_proto(path, message):
write_proto(path, message, as_text=False) | null |
24,335 | import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def tf_version():
return tf.__version__
def is_tf_version_greater_than(version: str):
return tf_version() > LooseVersion(version) | null |
24,346 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
def get_logger():
def info(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(INFO)}
get_logger().info(msg, *args, extra=extra, **kwargs) | null |
24,347 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
# TODO(yuwang): Format by environment variable.
s = '%c%02d%02d %02d:%02d:%02d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
basename,
line)
return s
def get_logger():
"""Return logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('nndct')
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
def warn(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(WARN)}
get_logger().warning(msg, *args, extra=extra, **kwargs) | null |
24,349 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
# TODO(yuwang): Format by environment variable.
s = '%c%02d%02d %02d:%02d:%02d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
basename,
line)
return s
def get_logger():
"""Return logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('nndct')
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
def fatal(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(FATAL)}
get_logger().fatal(msg, *args, extra=extra, **kwargs) | null |
24,357 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from google.protobuf import text_format
def write_proto(path, message, as_text=False):
dir_name = os.path.dirname(path)
mkdir_if_not_exist(dir_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def write_binary_proto(path, message):
write_proto(path, message, as_text=False) | null |
24,359 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from google.protobuf import text_format
The provided code snippet includes necessary dependencies for implementing the `path_to_string` function. Write a Python function `def path_to_string(path)` to solve the following problem:
Convert `PathLike` objects to their string representation. If given a non-string typed path object, converts it to its string representation. If the object passed to `path` is not among the above, then it is returned unchanged. This allows e.g. passthrough of file objects through this function. Args: path: `PathLike` object that represents a path Returns: A string representation of the path argument, if Python support exists.
Here is the function:
def path_to_string(path):
"""Convert `PathLike` objects to their string representation.
If given a non-string typed path object, converts it to its string
representation.
If the object passed to `path` is not among the above, then it is
returned unchanged. This allows e.g. passthrough of file objects
through this function.
Args:
path: `PathLike` object that represents a path
Returns:
A string representation of the path argument, if Python support exists.
"""
if isinstance(path, os.PathLike):
return os.fspath(path)
return path | Convert `PathLike` objects to their string representation. If given a non-string typed path object, converts it to its string representation. If the object passed to `path` is not among the above, then it is returned unchanged. This allows e.g. passthrough of file objects through this function. Args: path: `PathLike` object that represents a path Returns: A string representation of the path argument, if Python support exists. |
24,360 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
keras = tf.keras
def data_format():
return keras.backend.image_data_format() | null |
24,361 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
def _keras_weight_name(name):
# Given 'dense/kernel:0', return 'kernel'.
return name.split('/')[-1].rsplit(':', 1)[0]
def get_named_weights(layer):
params = collections.OrderedDict()
weights = layer.get_weights()
for i, weight in enumerate(layer.weights):
name = _keras_weight_name(weight.name)
# For repeated weights, append index to its name.
# Usually this happens in a RNN layer.
if name in params:
name = '{}_{}'.format(name, i)
params[name] = weights[i]
return params | null |
24,362 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
def is_stacked_rnn_cells(layer):
return isinstance(layer, layers.StackedRNNCells) | null |
24,364 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
def get_layers(model):
layers = []
if hasattr(model, 'layers'):
layers = model.layers
elif hasattr(model, '_layers'):
sub_layers = model._layers
if len(sub_layers) > 0:
layers = sub_layers[0].layers if isinstance(
sub_layers[0], data_structures.ListWrapper) else sub_layers
else:
pass
return layers | null |
24,366 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
keras = tf.keras
The provided code snippet includes necessary dependencies for implementing the `gather_layers` function. Write a Python function `def gather_layers(layer, include_container=False)` to solve the following problem:
Gather all sub layers from given model. Args: layer: An instance of keras.Layer include_container: Whether to include layer container
Here is the function:
def gather_layers(layer, include_container=False):
"""Gather all sub layers from given model.
Args:
layer: An instance of keras.Layer
include_container: Whether to include layer container
"""
if not isinstance(layer, keras.Model):
return []
gathered_layers = []
to_visit = collections.deque([layer])
while to_visit:
obj = to_visit.popleft()
if isinstance(obj, keras.Model):
if include_container:
gathered_layers.append(obj)
to_visit.extendleft(reversed(obj.layers))
else:
gathered_layers.append(obj)
return gathered_layers | Gather all sub layers from given model. Args: layer: An instance of keras.Layer include_container: Whether to include layer container |
24,367 | import collections
import tensorflow as tf
from typing import Any, Callable, Dict, List, Optional, Union
from tensorflow.keras import layers
from tensorflow.python.eager import def_function
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
from nndct_shared.utils import common
keras = tf.keras
The provided code snippet includes necessary dependencies for implementing the `trace_model_call` function. Write a Python function `def trace_model_call(model, input_signature=None)` to solve the following problem:
Trace the model call to create a tf.function for exporting a Keras model. Args: model: A Keras model. input_signature: optional, a list of tf.TensorSpec objects specifying the inputs to the model. Returns: A tf.function wrapping the model's call function with input signatures set. Raises: ValueError: if input signature cannot be inferred from the model.
Here is the function:
def trace_model_call(model, input_signature=None):
"""Trace the model call to create a tf.function for exporting a Keras model.
Args:
model: A Keras model.
input_signature: optional, a list of tf.TensorSpec objects specifying the
inputs to the model.
Returns:
A tf.function wrapping the model's call function with input signatures set.
Raises:
ValueError: if input signature cannot be inferred from the model.
"""
if input_signature is None:
if isinstance(model.call, tf.__internal__.function.Function):
input_signature = model.call.input_signature
if input_signature:
model_args = input_signature
model_kwargs = {}
else:
model_args, model_kwargs = model_call_inputs(model)
input_signature = model_args # store
if model_args is None:
raise_model_input_error(model)
@tf.function
def _wrapped_model(*args, **kwargs):
"""A concrete tf.function that wraps the model's call function."""
kwargs['training'] = False
with base_layer_utils.call_context().enter(
model, inputs=None, build_graph=False, training=False, saving=True):
outputs = model(*args, **kwargs)
# Outputs always has to be a flat dict.
output_names = model.output_names # Functional Model.
if output_names is None: # Subclassed Model.
try:
from keras.engine import compile_utils # pylint: disable=g-import-not-at-top
except:
from tensorflow.python.keras.engine import compile_utils # pylint: disable=g-import-not-at-top
output_names = compile_utils.create_pseudo_output_names(outputs)
outputs = tf.nest.flatten(outputs)
return {name: output for name, output in zip(output_names, outputs)}
return _wrapped_model.get_concrete_function(*model_args, **model_kwargs) | Trace the model call to create a tf.function for exporting a Keras model. Args: model: A Keras model. input_signature: optional, a list of tf.TensorSpec objects specifying the inputs to the model. Returns: A tf.function wrapping the model's call function with input signatures set. Raises: ValueError: if input signature cannot be inferred from the model. |
24,371 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
def _to_graph_def(graph):
"""Convert nndct graph to tensorflow's GraphDef."""
graph_def = graph_pb2.GraphDef()
# TODO(yuwang): Add attrs to node.
for node in graph.nodes:
node_def = graph_def.node.add()
node_def.name = node.name
node_def.op = node.op.type
for in_node in node.in_nodes:
node_def.input.extend([in_node])
return graph_def
The provided code snippet includes necessary dependencies for implementing the `export_to_netron` function. Write a Python function `def export_to_netron(filepath, graph)` to solve the following problem:
Export the nndct `graph` to a serialized file specified by `filepath`. Here we use GraphDef as netron's input. See https://github.com/lutzroeder/netron
Here is the function:
def export_to_netron(filepath, graph):
"""Export the nndct `graph` to a serialized file specified by `filepath`.
Here we use GraphDef as netron's input.
See https://github.com/lutzroeder/netron
"""
graph_def = _to_graph_def(graph)
with gfile.GFile(filepath, "wb") as f:
f.write(graph_def.SerializeToString()) | Export the nndct `graph` to a serialized file specified by `filepath`. Here we use GraphDef as netron's input. See https://github.com/lutzroeder/netron |
24,373 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.utils.tensor_util import convert_parameter_tensor_format
from nndct_shared.utils.tensor_util import DataFormatMap
from nndct_shared.pruning import pruning_lib
from nndct_shared.base import FrameworkType
from tf_nndct.graph.ops import Tensor
from tf_nndct.utils import keras_utils
def tf_param_to_nndct(tensor):
class Tensor(base_tensor.Tensor):
def __init__(self,
name=None,
shape=None,
dtype=None,
data=None,
producer=None):
def from_numpy(cls, name, data):
def is_produced_by(self, node):
def transpose(self, axes):
def clone(self):
def producer(self):
def param_from_tf_numpy(name, ndarray):
tensor = Tensor.from_numpy(name, ndarray)
return tf_param_to_nndct(tensor) | null |
24,377 | import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tf_nndct import IterativePruningRunner
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data('mnist.npz')
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def evaluate(model):
def train(model, save_path, epochs=10):
batch_size = 128
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1)
model.evaluate(x_test, y_test, verbose=1)
model.save_weights(save_path, save_format='tf') | null |
24,383 | from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
def mnist_convnet():
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
class IterativePruningRunner(object):
def __init__(
self, model_name: str, sess: SessionInterface,
input_specs: Mapping[str, tf.TensorSpec],
output_node_names: List[str], excludes: List[str]=[]) -> None:
def _fill_in_weights(self, weights: Mapping[str, np.ndarray]) -> None:
def ana(
self, eval_fn: Callable[[tf.compat.v1.GraphDef], float],
gpu_ids: List[str]=['/GPU:0'], checkpoint_interval: int = 10) -> None:
def _get_spec_by_sparsity(self, sparsity: float, max_attemp: int) -> PruningSpec:
def prune(self, sparsity: float=None, threshold: float=None, max_attemp: int=10) -> Tuple[Mapping[str, TensorProto], Mapping[str, np.ndarray]]:
def _prune(self, spec: PruningSpec) -> Tuple[Mapping[str, TensorProto], Mapping[str, np.ndarray], Mapping[str, np.ndarray]]:
def _shape_inference(self, graph_def: tf.compat.v1.GraphDef, node_def_map: Mapping[str, tf.compat.v1.GraphDef], node_pruning_descs: Mapping[str, PruningDesc]) -> None:
def _update_shape_tensor(
self, graph_def: tf.compat.v1.GraphDef, node_def_map: Mapping[str, tf.compat.v1.GraphDef],
node_pruning_descs: Mapping[str, PruningDesc]) -> Mapping[str, TensorProto]:
def _update_weights(
self, graph_def: tf.compat.v1.GraphDef, node_def_map: Mapping[str, tf.compat.v1.GraphDef],
node_pruning_descs: Mapping[str, PruningDesc], weights: Mapping[str, np.ndarray]) -> Mapping[str, np.ndarray]:
def _get_channel_indices_to_remove(self, weight: np.ndarray, axis: int, remain_depth: int) -> List[int]:
def _get_slim_ndarray(self, array: np.ndarray, mask: np.ndarray) -> np.ndarray:
def get_slim_graph_def(self, shape_tensors: Mapping[str, TensorProto]=None, masks: Mapping[str, np.ndarray]=None) -> tf.compat.v1.GraphDef:
def prune():
with tf.Session() as sess:
model, input_shape = mnist_convnet()
sess.run(tf.global_variables_initializer())
input_specs={'input_1:0': tf.TensorSpec(shape=(1, 28, 28, 1), dtype=tf.dtypes.float32)}
pruner = IterativePruningRunner("mnist", sess, input_specs, ["dense/BiasAdd"])
pruner.ana(eval_fn, gpu_ids=['/GPU:0', '/GPU:1'])
shape_tensors, masks = pruner.prune(sparsity=0.5)
def loss_fn():
images = np.ones((1, 28, 28, 1), dtype=np.float32)
out = model(images, training=True)
return tf.reduce_sum(out)
opt = tf.compat.v1.train.GradientDescentOptimizer(3.0)
sess.run(opt.minimize(loss_fn, var_list=tf.trainable_variables()))
slim_graph_def = pruner.get_slim_graph_def(shape_tensors, masks) | null |
24,391 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
The provided code snippet includes necessary dependencies for implementing the `conv2d_same` function. Write a Python function `def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None)` to solve the following problem:
Strided 2-D convolution with 'SAME' padding. When stride > 1, then we do explicit zero-padding, followed by conv2d with 'VALID' padding. Note that net = conv2d_same(inputs, num_outputs, 3, stride=stride) is equivalent to net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') net = subsample(net, factor=stride) whereas net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') is different when the input's height or width is even, which is why we add the current function. For more details, see ResnetUtilsTest.testConv2DSameEven(). Args: inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. num_outputs: An integer, the number of output filters. kernel_size: An int with the kernel_size of the filters. stride: An integer, the output stride. rate: An integer, rate for atrous convolution. scope: Scope. Returns: output: A 4-D tensor of size [batch, height_out, width_out, channels] with the convolution output.
Here is the function:
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope) | Strided 2-D convolution with 'SAME' padding. When stride > 1, then we do explicit zero-padding, followed by conv2d with 'VALID' padding. Note that net = conv2d_same(inputs, num_outputs, 3, stride=stride) is equivalent to net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') net = subsample(net, factor=stride) whereas net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') is different when the input's height or width is even, which is why we add the current function. For more details, see ResnetUtilsTest.testConv2DSameEven(). Args: inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. num_outputs: An integer, the number of output filters. kernel_size: An int with the kernel_size of the filters. stride: An integer, the output stride. rate: An integer, rate for atrous convolution. scope: Scope. Returns: output: A 4-D tensor of size [batch, height_out, width_out, channels] with the convolution output. |
24,392 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
The provided code snippet includes necessary dependencies for implementing the `stack_blocks_dense` function. Write a Python function `def stack_blocks_dense(net, blocks, output_stride=None, store_non_strided_activations=False, outputs_collections=None)` to solve the following problem:
Stacks ResNet `Blocks` and controls output feature density. First, this function creates scopes for the ResNet in the form of 'block_name/unit_1', 'block_name/unit_2', etc. Second, this function allows the user to explicitly control the ResNet output_stride, which is the ratio of the input to output spatial resolution. This is useful for dense prediction tasks such as semantic segmentation or object detection. Most ResNets consist of 4 ResNet blocks and subsample the activations by a factor of 2 when transitioning between consecutive ResNet blocks. This results to a nominal ResNet output_stride equal to 8. If we set the output_stride to half the nominal network stride (e.g., output_stride=4), then we compute responses twice. Control of the output feature density is implemented by atrous convolution. Args: net: A `Tensor` of size [batch, height, width, channels]. blocks: A list of length equal to the number of ResNet `Blocks`. Each element is a ResNet `Block` object describing the units in the `Block`. output_stride: If `None`, then the output will be computed at the nominal network stride. If output_stride is not `None`, it specifies the requested ratio of input to output spatial resolution, which needs to be equal to the product of unit strides from the start up to some level of the ResNet. For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1, then valid values for the output_stride are 1, 2, 6, 24 or None (which is equivalent to output_stride=24). store_non_strided_activations: If True, we compute non-strided (undecimated) activations at the last unit of each block and store them in the `outputs_collections` before subsampling them. This gives us access to higher resolution intermediate activations which are useful in some dense prediction problems but increases 4x the computation and memory cost at the last unit of each block. outputs_collections: Collection to add the ResNet block outputs. Returns: net: Output tensor with stride equal to the specified output_stride. Raises: ValueError: If the target output_stride is not valid.
Here is the function:
def stack_blocks_dense(net, blocks, output_stride=None,
store_non_strided_activations=False,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
# Move stride from the block's last unit to the end of the block.
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
# Subsampling of the block's output activations.
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net | Stacks ResNet `Blocks` and controls output feature density. First, this function creates scopes for the ResNet in the form of 'block_name/unit_1', 'block_name/unit_2', etc. Second, this function allows the user to explicitly control the ResNet output_stride, which is the ratio of the input to output spatial resolution. This is useful for dense prediction tasks such as semantic segmentation or object detection. Most ResNets consist of 4 ResNet blocks and subsample the activations by a factor of 2 when transitioning between consecutive ResNet blocks. This results to a nominal ResNet output_stride equal to 8. If we set the output_stride to half the nominal network stride (e.g., output_stride=4), then we compute responses twice. Control of the output feature density is implemented by atrous convolution. Args: net: A `Tensor` of size [batch, height, width, channels]. blocks: A list of length equal to the number of ResNet `Blocks`. Each element is a ResNet `Block` object describing the units in the `Block`. output_stride: If `None`, then the output will be computed at the nominal network stride. If output_stride is not `None`, it specifies the requested ratio of input to output spatial resolution, which needs to be equal to the product of unit strides from the start up to some level of the ResNet. For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1, then valid values for the output_stride are 1, 2, 6, 24 or None (which is equivalent to output_stride=24). store_non_strided_activations: If True, we compute non-strided (undecimated) activations at the last unit of each block and store them in the `outputs_collections` before subsampling them. This gives us access to higher resolution intermediate activations which are useful in some dense prediction problems but increases 4x the computation and memory cost at the last unit of each block. outputs_collections: Collection to add the ResNet block outputs. Returns: net: Output tensor with stride equal to the specified output_stride. Raises: ValueError: If the target output_stride is not valid. |
24,411 | import argparse
import os
import time
import torch
import torchvision.datasets as datasets
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
def reset(self):
def update(self, val, n=1):
def __str__(self):
def accuracy(output, target, topk=(1,)):
def eval_fn(model, dataloader_test):
top1 = AverageMeter('Acc@1', ':6.2f')
model.eval()
with torch.no_grad():
for i, (images, targets) in enumerate(dataloader_test):
images = images.cuda()
targets = targets.cuda()
outputs = model(images)
acc1, _ = accuracy(outputs, targets, topk=(1, 5))
top1.update(acc1[0], images.size(0))
return top1.avg | null |
24,417 | import argparse
import os
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions
for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def evaluate(dataloader, model, criterion):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(dataloader):
model = model.cuda()
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 50 == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(
top1=top1, top5=top5))
return top1.avg, top5.avg | null |
24,420 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation) | 3x3 convolution with padding |
24,421 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution |
24,422 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
class Bottleneck(nn.Module):
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None):
def forward(self, x):
class ResNet(nn.Module):
def __init__(self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None):
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
def forward(self, x):
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) | null |
24,423 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.add = functional.Add()
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
#out = out.bfloat16().float() + identity.bfloat16().float()
out = self.add(out, identity)
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
# conv1
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
# conv2_x
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
# conv3_x
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
# conv4_x
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
# conv5_x
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
# avgpool, fc
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion))
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
#x = self.avgpool(x.bfloat16().float())
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) | null |
24,424 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.add = functional.Add()
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
#out = out.bfloat16().float() + identity.bfloat16().float()
out = self.add(out, identity)
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
# conv1
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
# conv2_x
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
# conv3_x
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
# conv4_x
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
# conv5_x
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
# avgpool, fc
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion))
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
#x = self.avgpool(x.bfloat16().float())
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) | null |
24,425 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
torch.backends.cudnn.deterministic = True
args = parser.parse_args()
def load_imagenet(batch_size):
# Data loading code
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dir = os.path.join(args.data_dir, 'train')
val_dir = os.path.join(args.data_dir, 'val')
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
train_dir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
val_dir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size,
shuffle=False,
num_workers=4,
pin_memory=True)
return train_loader, val_loader | null |
24,426 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
def validate(val_loader, model, criterion, device, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.val_display_freq == 0:
progress.display(i)
print(
' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} '.format(
top1=top1, top5=top5),
flush=True)
print(
' * Normalized_Acc@1 {res:.3f} \n'.format(res=top1.avg / args.acc),
flush=True)
return top1.avg
def save_checkpoint(state, is_best, args):
ckpt_path = os.path.join(args.model_dir, 'ckpt.pth')
torch.save(state, ckpt_path)
if is_best:
shutil.copyfile(ckpt_path, os.path.join(args.model_dir, 'best_ckpt.pth'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries), flush=True)
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(train_loader, val_loader, model, criterion, optimizer, epoch, device,
best_acc1, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), [batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.train_display_freq == 0:
progress.display(i)
if i != 0 and i % args.val_freq_iters == 0:
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, device, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
print('best_acc1: {a:.3f} \n'.format(a=best_acc1))
print('best_acc1 normalized: {b:.3f} \n'.format(b=best_acc1 / args.acc))
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args)
return best_acc1 | null |
24,427 | import argparse
import os
import shutil
import time
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct.quantization import bfp
The provided code snippet includes necessary dependencies for implementing the `adjust_learning_rate` function. Write a Python function `def adjust_learning_rate(optimizer, epoch, args)` to solve the following problem:
Sets the learning rate to the initial LR decayed by 10 every 10 epochs
Here is the function:
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 10 epochs"""
lr = args.lr * (0.1**(epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR decayed by 10 every 10 epochs |
24,428 | import os
import re
import sys
import argparse
import time
import pdb
import random
from pytorch_nndct.apis import torch_quantizer
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet18
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args, _ = parser.parse_known_args()
def load_data(train=True,
data_dir='dataset/imagenet',
batch_size=128,
subset_len=None,
sample_method='random',
distributed=False,
model_name='resnet18',
**kwargs):
def evaluate(model, val_loader, loss_fn):
def forward_loop(model, val_loader):
class torch_quantizer():
def __init__(self,
quant_mode: str, # ['calib', 'test']
module: Union[torch.nn.Module, List[torch.nn.Module]],
input_args: Union[torch.Tensor, Sequence[Any]] = None,
input_kwargs: Dict = None,
state_dict_file: Optional[str] = None,
output_dir: str = "quantize_result",
bitwidth: int = None,
mix_bit: bool = False,
device: torch.device = torch.device("cuda"),
lstm: bool = False,
app_deploy: str = "CV",
qat_proc: bool = False,
custom_quant_ops: List[str] = None,
quant_config_file: Optional[str] = None,
target: Optional[str] = None,
dynamo: bool = False):
def fast_finetune(self, run_fn, run_args):
def load_ft_param(self):
def quantize(self, run_fn, run_args, ft_run_args=None):
def test(self, run_fn, run_args):
def deploy(self, run_fn, run_args, fmt='xmodel'):
def export_quant_config(self):
def export_xmodel(self, output_dir="quantize_result", deploy_check=False, dynamic_batch=False):
def export_onnx_model(self, output_dir="quantize_result", verbose=False, dynamic_batch=False, opset_version=None, native_onnx=True, dump_layers=False, check_model=False, opt_graph=False):
def export_traced_torch_script(self, output_dir="quantize_result", verbose=False):
def export_torch_script(self, output_dir="quantize_result", verbose=False):
def quant_model(self):
def deploy_model(self):
class Inspector(object):
def __init__(self, name_or_fingerprint: str):
def inspect(self, module: torch.nn.Module,
input_args: Union[torch.Tensor, Tuple[Any]],
device: torch.device = torch.device("cuda"),
output_dir: str = "quantize_result",
verbose_level: int = 1,
image_format: Optional[str] = None):
def quantization(title='optimize',
model_name='',
file_path=''):
data_dir = args.data_dir
quant_mode = args.quant_mode
finetune = args.fast_finetune
deploy = args.deploy
batch_size = args.batch_size
subset_len = args.subset_len
inspect = args.inspect
config_file = args.config_file
target = args.target
if quant_mode != 'test' and deploy:
deploy = False
print(r'Warning: Exporting xmodel needs to be done in quantization test mode, turn off it in this running!')
if deploy and (batch_size != 1 or subset_len != 1):
print(r'Warning: Exporting xmodel needs batch size to be 1 and only 1 iteration of inference, change them automatically!')
batch_size = 1
subset_len = 1
model = resnet18().cpu()
model.load_state_dict(torch.load(file_path))
input = torch.randn([batch_size, 3, 224, 224])
if quant_mode == 'float':
quant_model = model
if inspect:
if not target:
raise RuntimeError("A target should be specified for inspector.")
import sys
from pytorch_nndct.apis import Inspector
# create inspector
inspector = Inspector(target) # by name
# start to inspect
inspector.inspect(quant_model, (input,), device=device)
sys.exit()
else:
####################################################################################
# This function call will create a quantizer object and setup it.
# Eager mode model code will be converted to graph model.
# Quantization is not done here if it needs calibration.
quantizer = torch_quantizer(
quant_mode, model, (input), device=device, quant_config_file=config_file, target=target)
# Get the converted model to be quantized.
quant_model = quantizer.quant_model
#####################################################################################
# to get loss value after evaluation
loss_fn = torch.nn.CrossEntropyLoss().to(device)
val_loader, _ = load_data(
subset_len=subset_len,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
# fast finetune model or load finetuned parameter before test
if finetune == True:
ft_loader, _ = load_data(
subset_len=5120,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
if quant_mode == 'calib':
quantizer.fast_finetune(forward_loop, (quant_model, ft_loader))
elif quant_mode == 'test':
quantizer.load_ft_param()
if quant_mode == 'calib':
# This function call is to do forward loop for model to be quantized.
# Quantization calibration will be done after it.
forward_loop(quant_model, val_loader)
# Exporting intermediate files will be used when quant_mode is 'test'. This is must.
quantizer.export_quant_config()
else:
acc1_gen, acc5_gen, loss_gen = evaluate(quant_model, val_loader, loss_fn)
# logging accuracy
print('loss: %g' % (loss_gen))
print('top-1 / top-5 accuracy: %g / %g' % (acc1_gen, acc5_gen))
# handle quantization result
if quant_mode == 'test' and deploy:
quantizer.export_torch_script()
quantizer.export_onnx_model()
quantizer.export_xmodel() | null |
24,429 | import os
import re
import sys
import argparse
import time
import random
from pytorch_nndct.apis import torch_quantizer, dump_xmodel
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.models.mobilenet import mobilenet_v2
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args, _ = parser.parse_known_args()
def load_data(train=True,
data_dir='dataset/imagenet',
batch_size=128,
subset_len=None,
sample_method='random',
distributed=False,
model_name='mobilenet_v2',
**kwargs):
#prepare data
# random.seed(12345)
traindir = data_dir + '/train'
valdir = data_dir + '/val'
train_sampler = None
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if model_name == 'inception_v3':
size = 299
resize = 299
else:
size = 224
resize = 256
if train:
dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if subset_len:
assert subset_len <= len(dataset)
if sample_method == 'random':
dataset = torch.utils.data.Subset(
dataset, random.sample(range(0, len(dataset)), subset_len))
else:
dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler,
**kwargs)
else:
dataset = torchvision.datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(resize),
transforms.CenterCrop(size),
transforms.ToTensor(),
normalize,
]))
if subset_len:
assert subset_len <= len(dataset)
if sample_method == 'random':
dataset = torch.utils.data.Subset(
dataset, random.sample(range(0, len(dataset)), subset_len))
else:
dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, **kwargs)
return data_loader, train_sampler
def evaluate(model, val_loader, loss_fn):
model.eval()
model = model.to(device)
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
total = 0
Loss = 0
for iteraction, (images, labels) in tqdm(
enumerate(val_loader), total=len(val_loader)):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = loss_fn(outputs, labels)
Loss += loss.item()
total += images.size(0)
acc1, acc5 = accuracy(outputs, labels, topk=(1, 5))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
return top1.avg, top5.avg, Loss / total
class torch_quantizer():
def __init__(self,
quant_mode: str, # ['calib', 'test']
module: Union[torch.nn.Module, List[torch.nn.Module]],
input_args: Union[torch.Tensor, Sequence[Any]] = None,
input_kwargs: Dict = None,
state_dict_file: Optional[str] = None,
output_dir: str = "quantize_result",
bitwidth: int = None,
mix_bit: bool = False,
device: torch.device = torch.device("cuda"),
lstm: bool = False,
app_deploy: str = "CV",
qat_proc: bool = False,
custom_quant_ops: List[str] = None,
quant_config_file: Optional[str] = None,
target: Optional[str] = None,
dynamo: bool = False):
self.device = device
input_data = StandardInputData(input_args, input_kwargs, device)
vaiq_system_info(device)
if NndctOption.nndct_target.value:
target = NndctOption.nndct_target.value
if NndctOption.nndct_inspect_test.value and target:
from pytorch_nndct.apis import Inspector
inspector = Inspector(target)
inspector.inspect(module, input_args, device, output_dir)
if bitwidth is None and quant_config_file is None:
bitwidth = 8
if app_deploy == "CV": lstm_app = False
elif app_deploy == "NLP": lstm_app = True
self._qat_proc = False
if qat_proc:
if bitwidth is None:
bitwidth = 8
self.processor = QatProcessor(model = module,
inputs = input_args,
bitwidth = bitwidth,
mix_bit = mix_bit,
device = device)
self._qat_proc = True
elif lstm:
self.processor = RNNQuantProcessor(quant_mode = quant_mode,
module = module,
input_args = input_args,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
# lstm IP only support 16 bit activation
bitwidth_a = 16,
device = device,
lstm_app = lstm_app,
quant_config_file = quant_config_file)
elif dynamo:
from pytorch_nndct.qproc.base import DynamoQuantProcessor
self.processor = DynamoQuantProcessor(quant_mode=quant_mode,
module=module,
input_args=input_args,
output_dir=output_dir,
bitwidth_w=bitwidth,
bitwidth_a=bitwidth,
device=device,
lstm_app=lstm_app,
quant_config_file=quant_config_file,
target=target)
else:
self.processor = TorchQuantProcessor(quant_mode = quant_mode,
module = module,
input_data = input_data,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
bitwidth_a = bitwidth,
device = device,
lstm_app = lstm_app,
custom_quant_ops = custom_quant_ops,
quant_config_file = quant_config_file,
target = target)
# Finetune parameters,
# After finetuning, run original forwarding code for calibration
# After calibration, run original forwarding code to test quantized model accuracy
def fast_finetune(self, run_fn, run_args):
self.processor.finetune(run_fn, run_args)
# load finetuned parameters
def load_ft_param(self):
#self.processor.advanced_quant_setup()
self.processor.quantizer.load_param()
# calibration can be called in the same process with test and deploy
def quantize(self, run_fn, run_args, ft_run_args=None):
self.processor.quantize(run_fn, run_args, ft_run_args)
# test can be called in the same process with calibration and deploy
def test(self, run_fn, run_args):
self.processor.test(run_fn, run_args)
# deploy can be called in the same process with calibration and test
def deploy(self, run_fn, run_args, fmt='xmodel'):
self.processor.deploy(run_fn, run_args, fmt)
# export quantization steps information for tensors to be quantized
def export_quant_config(self):
self.processor.export_quant_config()
# export xmodel for compilation
def export_xmodel(self, output_dir="quantize_result", deploy_check=False, dynamic_batch=False):
self.processor.export_xmodel(output_dir, deploy_check, dynamic_batch)
def export_onnx_model(self, output_dir="quantize_result", verbose=False, dynamic_batch=False, opset_version=None, native_onnx=True, dump_layers=False, check_model=False, opt_graph=False):
self.processor.export_onnx_model(output_dir, verbose, dynamic_batch, opset_version, native_onnx, dump_layers, check_model, opt_graph)
def export_traced_torch_script(self, output_dir="quantize_result", verbose=False):
NndctScreenLogger().warning(
'"export_traced_torch_script" is deprecated and will be removed in the future. '
'Use "export_torch_script" instead.')
self.processor.export_traced_torch_script(output_dir, verbose)
def export_torch_script(self, output_dir="quantize_result", verbose=False):
return self.processor.export_torch_script(output_dir, verbose)
def quant_model(self):
NndctScreenLogger().info(f"=>Get module with quantization.")
return self.processor.quant_model()
def deploy_model(self):
if not self._qat_proc:
NndctScreenLogger().warning2user(QWarning.DEPLOY_MODEL, f"Only quant aware training process has deployable model.")
return
NndctScreenLogger().info(f"=>Get deployable module.")
return self.processor.deploy_model()
def quantization(title='optimize',
model_name='',
file_path=''):
data_dir = args.data_dir
quant_mode = args.quant_mode
finetune = True
deploy = args.deploy
batch_size = args.batch_size
subset_len = args.subset_len
if quant_mode != 'test' and deploy:
deploy = False
print(r'Warning: Exporting xmodel needs to be done in quantization test mode, turn off it in this running!')
if deploy and (batch_size != 1 or subset_len != 1):
print(r'Warning: Exporting xmodel needs batch size to be 1 and only 1 iteration of inference, change them automatically!')
batch_size = 1
subset_len = 1
model = mobilenet_v2().cpu()
model.load_state_dict(torch.load(file_path))
input = torch.randn([batch_size, 3, 224, 224])
if quant_mode == 'float':
quant_model = model
else:
## new api
####################################################################################
quantizer = torch_quantizer(
quant_mode, model, (input), device=device)
quant_model = quantizer.quant_model
#####################################################################################
# to get loss value after evaluation
loss_fn = torch.nn.CrossEntropyLoss().to(device)
val_loader, _ = load_data(
subset_len=subset_len,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
# fast finetune model or load finetuned parameter before test
if finetune == True:
ft_loader, _ = load_data(
subset_len=5120,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
if quant_mode == 'calib':
quantizer.fast_finetune(evaluate, (quant_model, ft_loader, loss_fn))
elif quant_mode == 'test':
quantizer.load_ft_param()
# record modules float model accuracy
# add modules float model accuracy here
acc_org1 = 0.0
acc_org5 = 0.0
loss_org = 0.0
#register_modification_hooks(model_gen, train=False)
acc1_gen, acc5_gen, loss_gen = evaluate(quant_model, val_loader, loss_fn)
# logging accuracy
print('loss: %g' % (loss_gen))
print('top-1 / top-5 accuracy: %g / %g' % (acc1_gen, acc5_gen))
# handle quantization result
if quant_mode == 'calib':
quantizer.export_quant_config()
if deploy:
quantizer.export_xmodel(deploy_check=False) | null |
24,430 | import os
import re
import sys
import argparse
import time
import pdb
import random
from pytorch_nndct.apis import torch_quantizer
from pytorch_nndct.utils import register_custom_op
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torchvision.models.resnet import ResNet, BasicBlock
from tqdm import tqdm
def custom_op(ctx, x: torch.Tensor, y:torch.Tensor, scale_1:float, scale_2:float) -> torch.Tensor:
return scale_1 * x + scale_2 * y | null |
24,431 | import os
import re
import sys
import argparse
import time
import pdb
import random
from pytorch_nndct.apis import torch_quantizer
from pytorch_nndct.utils import register_custom_op
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torchvision.models.resnet import ResNet, BasicBlock
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args, _ = parser.parse_known_args()
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def load_data(train=True,
data_dir='dataset/imagenet',
batch_size=128,
subset_len=None,
sample_method='random',
distributed=False,
model_name='resnet18',
**kwargs):
#prepare data
# random.seed(12345)
traindir = data_dir + '/train'
valdir = data_dir + '/val'
train_sampler = None
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if model_name == 'inception_v3':
size = 299
resize = 299
else:
size = 224
resize = 256
if train:
dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if subset_len:
assert subset_len <= len(dataset)
if sample_method == 'random':
dataset = torch.utils.data.Subset(
dataset, random.sample(range(0, len(dataset)), subset_len))
else:
dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler,
**kwargs)
else:
dataset = torchvision.datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(resize),
transforms.CenterCrop(size),
transforms.ToTensor(),
normalize,
]))
if subset_len:
assert subset_len <= len(dataset)
if sample_method == 'random':
dataset = torch.utils.data.Subset(
dataset, random.sample(range(0, len(dataset)), subset_len))
else:
dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, **kwargs)
return data_loader, train_sampler
def evaluate(model, val_loader, loss_fn):
model.eval()
model = model.to(device)
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
total = 0
Loss = 0
for iteraction, (images, labels) in tqdm(
enumerate(val_loader), total=len(val_loader)):
images = images.to(device)
labels = labels.to(device)
#pdb.set_trace()
outputs = model(images)
loss = loss_fn(outputs, labels)
Loss += loss.item()
total += images.size(0)
acc1, acc5 = accuracy(outputs, labels, topk=(1, 5))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
return top1.avg, top5.avg, Loss / total
class torch_quantizer():
def __init__(self,
quant_mode: str, # ['calib', 'test']
module: Union[torch.nn.Module, List[torch.nn.Module]],
input_args: Union[torch.Tensor, Sequence[Any]] = None,
input_kwargs: Dict = None,
state_dict_file: Optional[str] = None,
output_dir: str = "quantize_result",
bitwidth: int = None,
mix_bit: bool = False,
device: torch.device = torch.device("cuda"),
lstm: bool = False,
app_deploy: str = "CV",
qat_proc: bool = False,
custom_quant_ops: List[str] = None,
quant_config_file: Optional[str] = None,
target: Optional[str] = None,
dynamo: bool = False):
self.device = device
input_data = StandardInputData(input_args, input_kwargs, device)
vaiq_system_info(device)
if NndctOption.nndct_target.value:
target = NndctOption.nndct_target.value
if NndctOption.nndct_inspect_test.value and target:
from pytorch_nndct.apis import Inspector
inspector = Inspector(target)
inspector.inspect(module, input_args, device, output_dir)
if bitwidth is None and quant_config_file is None:
bitwidth = 8
if app_deploy == "CV": lstm_app = False
elif app_deploy == "NLP": lstm_app = True
self._qat_proc = False
if qat_proc:
if bitwidth is None:
bitwidth = 8
self.processor = QatProcessor(model = module,
inputs = input_args,
bitwidth = bitwidth,
mix_bit = mix_bit,
device = device)
self._qat_proc = True
elif lstm:
self.processor = RNNQuantProcessor(quant_mode = quant_mode,
module = module,
input_args = input_args,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
# lstm IP only support 16 bit activation
bitwidth_a = 16,
device = device,
lstm_app = lstm_app,
quant_config_file = quant_config_file)
elif dynamo:
from pytorch_nndct.qproc.base import DynamoQuantProcessor
self.processor = DynamoQuantProcessor(quant_mode=quant_mode,
module=module,
input_args=input_args,
output_dir=output_dir,
bitwidth_w=bitwidth,
bitwidth_a=bitwidth,
device=device,
lstm_app=lstm_app,
quant_config_file=quant_config_file,
target=target)
else:
self.processor = TorchQuantProcessor(quant_mode = quant_mode,
module = module,
input_data = input_data,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
bitwidth_a = bitwidth,
device = device,
lstm_app = lstm_app,
custom_quant_ops = custom_quant_ops,
quant_config_file = quant_config_file,
target = target)
# Finetune parameters,
# After finetuning, run original forwarding code for calibration
# After calibration, run original forwarding code to test quantized model accuracy
def fast_finetune(self, run_fn, run_args):
self.processor.finetune(run_fn, run_args)
# load finetuned parameters
def load_ft_param(self):
#self.processor.advanced_quant_setup()
self.processor.quantizer.load_param()
# calibration can be called in the same process with test and deploy
def quantize(self, run_fn, run_args, ft_run_args=None):
self.processor.quantize(run_fn, run_args, ft_run_args)
# test can be called in the same process with calibration and deploy
def test(self, run_fn, run_args):
self.processor.test(run_fn, run_args)
# deploy can be called in the same process with calibration and test
def deploy(self, run_fn, run_args, fmt='xmodel'):
self.processor.deploy(run_fn, run_args, fmt)
# export quantization steps information for tensors to be quantized
def export_quant_config(self):
self.processor.export_quant_config()
# export xmodel for compilation
def export_xmodel(self, output_dir="quantize_result", deploy_check=False, dynamic_batch=False):
self.processor.export_xmodel(output_dir, deploy_check, dynamic_batch)
def export_onnx_model(self, output_dir="quantize_result", verbose=False, dynamic_batch=False, opset_version=None, native_onnx=True, dump_layers=False, check_model=False, opt_graph=False):
self.processor.export_onnx_model(output_dir, verbose, dynamic_batch, opset_version, native_onnx, dump_layers, check_model, opt_graph)
def export_traced_torch_script(self, output_dir="quantize_result", verbose=False):
NndctScreenLogger().warning(
'"export_traced_torch_script" is deprecated and will be removed in the future. '
'Use "export_torch_script" instead.')
self.processor.export_traced_torch_script(output_dir, verbose)
def export_torch_script(self, output_dir="quantize_result", verbose=False):
return self.processor.export_torch_script(output_dir, verbose)
def quant_model(self):
NndctScreenLogger().info(f"=>Get module with quantization.")
return self.processor.quant_model()
def deploy_model(self):
if not self._qat_proc:
NndctScreenLogger().warning2user(QWarning.DEPLOY_MODEL, f"Only quant aware training process has deployable model.")
return
NndctScreenLogger().info(f"=>Get deployable module.")
return self.processor.deploy_model()
def quantization(title='optimize',
model_name='',
file_path=''):
data_dir = args.data_dir
quant_mode = args.quant_mode
finetune = args.fast_finetune
deploy = args.deploy
batch_size = args.batch_size
subset_len = args.subset_len
if quant_mode != 'test' and deploy:
deploy = False
print(r'Warning: Exporting xmodel needs to be done in quantization test mode, turn off it in this running!')
if deploy and (batch_size != 1 or subset_len != 1):
print(r'Warning: Exporting xmodel needs batch size to be 1 and only 1 iteration of inference, change them automatically!')
batch_size = 1
subset_len = 1
model = resnet18().cpu()
model.load_state_dict(torch.load(file_path))
input = torch.randn([batch_size, 3, 224, 224])
if quant_mode == 'float':
quant_model = model
else:
## new api
####################################################################################
quantizer = torch_quantizer(
quant_mode, model, (input), device=device)
quant_model = quantizer.quant_model
#####################################################################################
# to get loss value after evaluation
loss_fn = torch.nn.CrossEntropyLoss().to(device)
val_loader, _ = load_data(
subset_len=subset_len,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
# fast finetune model or load finetuned parameter before test
if finetune == True:
ft_loader, _ = load_data(
subset_len=5120,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
if quant_mode == 'calib':
quantizer.fast_finetune(evaluate, (quant_model, ft_loader, loss_fn))
elif quant_mode == 'test':
quantizer.load_ft_param()
# record modules float model accuracy
# add modules float model accuracy here
acc_org1 = 0.0
acc_org5 = 0.0
loss_org = 0.0
#register_modification_hooks(model_gen, train=False)
acc1_gen, acc5_gen, loss_gen = evaluate(quant_model, val_loader, loss_fn)
# logging accuracy
print('loss: %g' % (loss_gen))
print('top-1 / top-5 accuracy: %g / %g' % (acc1_gen, acc5_gen))
# handle quantization result
if quant_mode == 'calib':
quantizer.export_quant_config()
if deploy:
quantizer.export_xmodel(deploy_check=False) | null |
24,432 | import argparse
import json
import logging
import time
from collections import OrderedDict
from contextlib import suppress
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.models import create_model, load_checkpoint
from timm.utils import accuracy, AverageMeter, setup_default_logging, ParseKwargs
_logger = logging.getLogger('validate')
class torch_quantizer():
def __init__(self,
quant_mode: str, # ['calib', 'test']
module: Union[torch.nn.Module, List[torch.nn.Module]],
input_args: Union[torch.Tensor, Sequence[Any]] = None,
input_kwargs: Dict = None,
state_dict_file: Optional[str] = None,
output_dir: str = "quantize_result",
bitwidth: int = None,
mix_bit: bool = False,
device: torch.device = torch.device("cuda"),
lstm: bool = False,
app_deploy: str = "CV",
qat_proc: bool = False,
custom_quant_ops: List[str] = None,
quant_config_file: Optional[str] = None,
target: Optional[str] = None,
dynamo: bool = False):
self.device = device
input_data = StandardInputData(input_args, input_kwargs, device)
vaiq_system_info(device)
if NndctOption.nndct_target.value:
target = NndctOption.nndct_target.value
if NndctOption.nndct_inspect_test.value and target:
from pytorch_nndct.apis import Inspector
inspector = Inspector(target)
inspector.inspect(module, input_args, device, output_dir)
if bitwidth is None and quant_config_file is None:
bitwidth = 8
if app_deploy == "CV": lstm_app = False
elif app_deploy == "NLP": lstm_app = True
self._qat_proc = False
if qat_proc:
if bitwidth is None:
bitwidth = 8
self.processor = QatProcessor(model = module,
inputs = input_args,
bitwidth = bitwidth,
mix_bit = mix_bit,
device = device)
self._qat_proc = True
elif lstm:
self.processor = RNNQuantProcessor(quant_mode = quant_mode,
module = module,
input_args = input_args,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
# lstm IP only support 16 bit activation
bitwidth_a = 16,
device = device,
lstm_app = lstm_app,
quant_config_file = quant_config_file)
elif dynamo:
from pytorch_nndct.qproc.base import DynamoQuantProcessor
self.processor = DynamoQuantProcessor(quant_mode=quant_mode,
module=module,
input_args=input_args,
output_dir=output_dir,
bitwidth_w=bitwidth,
bitwidth_a=bitwidth,
device=device,
lstm_app=lstm_app,
quant_config_file=quant_config_file,
target=target)
else:
self.processor = TorchQuantProcessor(quant_mode = quant_mode,
module = module,
input_data = input_data,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
bitwidth_a = bitwidth,
device = device,
lstm_app = lstm_app,
custom_quant_ops = custom_quant_ops,
quant_config_file = quant_config_file,
target = target)
# Finetune parameters,
# After finetuning, run original forwarding code for calibration
# After calibration, run original forwarding code to test quantized model accuracy
def fast_finetune(self, run_fn, run_args):
self.processor.finetune(run_fn, run_args)
# load finetuned parameters
def load_ft_param(self):
#self.processor.advanced_quant_setup()
self.processor.quantizer.load_param()
# calibration can be called in the same process with test and deploy
def quantize(self, run_fn, run_args, ft_run_args=None):
self.processor.quantize(run_fn, run_args, ft_run_args)
# test can be called in the same process with calibration and deploy
def test(self, run_fn, run_args):
self.processor.test(run_fn, run_args)
# deploy can be called in the same process with calibration and test
def deploy(self, run_fn, run_args, fmt='xmodel'):
self.processor.deploy(run_fn, run_args, fmt)
# export quantization steps information for tensors to be quantized
def export_quant_config(self):
self.processor.export_quant_config()
# export xmodel for compilation
def export_xmodel(self, output_dir="quantize_result", deploy_check=False, dynamic_batch=False):
self.processor.export_xmodel(output_dir, deploy_check, dynamic_batch)
def export_onnx_model(self, output_dir="quantize_result", verbose=False, dynamic_batch=False, opset_version=None, native_onnx=True, dump_layers=False, check_model=False, opt_graph=False):
self.processor.export_onnx_model(output_dir, verbose, dynamic_batch, opset_version, native_onnx, dump_layers, check_model, opt_graph)
def export_traced_torch_script(self, output_dir="quantize_result", verbose=False):
NndctScreenLogger().warning(
'"export_traced_torch_script" is deprecated and will be removed in the future. '
'Use "export_torch_script" instead.')
self.processor.export_traced_torch_script(output_dir, verbose)
def export_torch_script(self, output_dir="quantize_result", verbose=False):
return self.processor.export_torch_script(output_dir, verbose)
def quant_model(self):
NndctScreenLogger().info(f"=>Get module with quantization.")
return self.processor.quant_model()
def deploy_model(self):
if not self._qat_proc:
NndctScreenLogger().warning2user(QWarning.DEPLOY_MODEL, f"Only quant aware training process has deployable model.")
return
NndctScreenLogger().info(f"=>Get deployable module.")
return self.processor.deploy_model()
class Inspector(object):
def __init__(self, name_or_fingerprint: str):
"""The inspector is design to diagnoise neural network(NN) model under different architecure of DPU.
It's very useful to find which type of device will be assigned to the operator in NN model.
It can provide hardware constraints messages for user to optimize NN model for deployment.
"""
if NndctOption.nndct_use_old_inspector.value is True:
from pytorch_nndct.hardware import InspectorImpl
else:
from pytorch_nndct.hardware_v3 import InspectorImpl
NndctScreenLogger().info("Inspector is on.")
in_type = "name"
if name_or_fingerprint.startswith("0x"):
in_type = "fingerprint"
self._inspector_impl = None
if in_type == "name":
self._inspector_impl = InspectorImpl.create_by_DPU_arch_name(name_or_fingerprint)
else:
self._inspector_impl = InspectorImpl.create_by_DPU_fingerprint(name_or_fingerprint)
def inspect(self, module: torch.nn.Module,
input_args: Union[torch.Tensor, Tuple[Any]],
device: torch.device = torch.device("cuda"),
output_dir: str = "quantize_result",
verbose_level: int = 1,
image_format: Optional[str] = None):
NndctScreenLogger().info(f"=>Start to inspect model...")
self._inspector_impl.inspect(module, input_args, device, output_dir, verbose_level)
if image_format is not None:
available_format = ["svg", "png"]
NndctScreenLogger().check2user(QError.INSPECTOR_OUTPUT_FORMAT, f"Only support dump svg or png format.", image_format in available_format)
self._inspector_impl.export_dot_image_v2(output_dir, image_format)
NndctScreenLogger().info(f"=>Finish inspecting.")
def validate(args):
# Setp 1: Prepare device, float model and data
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
device = torch.device(args.device)
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
elif args.device == 'cuda':
device = torch.device('cpu')
if args.deploy:
device = torch.device("cpu")
in_chans = 3
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=in_chans,
global_pool=args.gp,
scriptable=args.torchscript,
**args.model_kwargs,
)
model.to(device)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(
vars(args),
model=model,
use_test_size=not args.use_train_size,
verbose=True,
)
_logger.info('batch size: %d' % args.batch_size)
_logger.info('data config: %s' % data_config)
test_time_pool = False
criterion = nn.CrossEntropyLoss().to(device)
root_dir = args.data or args.data_dir
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
download=args.dataset_download,
class_map=args.class_map,
)
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = [int(line.rstrip()) for line in f]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
_ = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
crop_mode=data_config['crop_mode'],
pin_memory=args.pin_mem,
device=device
)
def set_data_loader(dataset, subset_len):
sub_dataset = dataset
if subset_len and subset_len > 0:
sample_method = 'random'
assert subset_len <= len(dataset)
if sample_method == 'random':
import random
sub_dataset = torch.utils.data.Subset(
dataset, random.sample(range(0, len(dataset)), subset_len))
else:
sub_dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
sub_loader = create_loader(
sub_dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
crop_mode=data_config['crop_mode'],
pin_memory=args.pin_mem,
device=device
)
return sub_loader
def simple_forward_loop(model, data_loader, device):
model.eval()
model = model.to(device)
from tqdm import tqdm
with torch.no_grad():
for _, (input, _) in enumerate(tqdm(data_loader)):
input = input.to(device)
model(input)
# Setp 2: Set quantization options for TIMM models
from nndct_shared.utils import option_util
option_util.set_option_value('nndct_leaky_relu_approximate', False)
quant_out_dir = f"{args.quantized_out}/{args.model}"
# Setp 3: Inspect the model for specific target(Optional)
if args.quant_mode in ["float"] and args.inspect:
from pytorch_nndct.apis import Inspector
dummy_input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device)
target = "DPUCAHX8L_ISA0_SP"
inspector = Inspector(target)
inspector.inspect(model, (dummy_input), device=device, output_dir=quant_out_dir + f"/{args.model}_inspect")
import sys
sys.exit(0)
# Setp 4: Create a quantizer object and configure its settings. Quantization is not done here if it needs calibration.
if args.quant_mode in ["calib", "test"]:
from pytorch_nndct.apis import torch_quantizer
dummy_input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device)
quantizer = torch_quantizer(args.quant_mode, model, input_args=(dummy_input), output_dir=quant_out_dir, device=device,quant_config_file=args.config_file)
model = quantizer.quant_model.to(device)
model.eval()
# Setp 5: Fast Finetune(Optional)
if args.fast_finetune == True:
if args.quant_mode == 'calib':
ff_data_loader = set_data_loader(dataset, args.ff_subset_len)
quantizer.fast_finetune(simple_forward_loop, (model, ff_data_loader, device))
elif args.quant_mode == 'test':
quantizer.load_ft_param()
# Setp 6: Evaluation
# This function call is to do forward loop for model to be quantized.
# Quantization calibration will be done after it if quant_mode is 'calib'.
# Quantization test will be done after it if quant_mode is 'test'.
eval_data_loader = set_data_loader(dataset, args.subset_len)
if args.quant_mode == 'calib':
simple_forward_loop(model, eval_data_loader, device)
else:
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
end = time.time()
for batch_idx, (input, target) in enumerate(eval_data_loader):
if args.no_prefetcher:
target = target.to(device)
input = input.to(device)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with suppress():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx,
len(eval_data_loader),
batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses,
top1=top1,
top5=top5
)
)
# Setp 7: Export quantization result
if args.quant_mode == "calib":
quantizer.export_quant_config()
return
# Setp 8: Deployment
if args.quant_mode == "test" and args.deploy:
quantizer.export_torch_script(output_dir=quant_out_dir)
quantizer.export_xmodel(output_dir=quant_out_dir)
quantizer.export_onnx_model(output_dir=quant_out_dir)
return
# Setp 9: Display inference result
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
model=args.model,
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
crop_pct=crop_pct,
interpolation=data_config['interpolation'],
)
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
print(f'--result\n{json.dumps(results, indent=4)}')
return | null |
24,433 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct import QatProcessor
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation) | 3x3 convolution with padding |
24,434 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct import QatProcessor
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution |
24,435 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
from pytorch_nndct import QatProcessor
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu1 = nn.ReLU(inplace=True)
#self.relu1 = nn.functional.relu
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.skip_add = functional.Add()
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip_add(out, identity)
out = self.relu2(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
model.load_state_dict(torch.load(args.pretrained))
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs) | null |
24,436 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation) | 3x3 convolution with padding |
24,437 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution |
24,438 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu1 = nn.ReLU(inplace=True)
#self.relu1 = nn.functional.relu
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.skip_add = functional.Add()
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = self.skip_add(out, identity)
out = self.relu2(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
model.load_state_dict(torch.load(args.pretrained))
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs) | null |
24,439 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
def get_gpus(device):
return [int(i) for i in device.split(',')] | null |
24,440 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions
for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def eval_fn(model, dataloader_test):
top1 = AverageMeter('Acc@1', ':6.2f')
model.eval()
with torch.no_grad():
for i, (images, targets) in enumerate(dataloader_test):
images = images.cuda()
targets = targets.cuda()
outputs = model(images)
acc1, _ = accuracy(outputs, targets, topk=(1, 5))
top1.update(acc1[0], images.size(0))
return top1.avg | null |
24,441 | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pytorch_nndct import get_pruning_runner
from pytorch_nndct import nn as nndct_nn
from pytorch_nndct.nn.modules import functional
def train(model, train_loader, val_loader, criterion, device_ids):
best_acc1 = 0
best_filepath = None
if device_ids is not None and len(device_ids) > 0:
device = f"cuda:{device_ids[0]}"
model = model.to(device)
if len(device_ids) > 1:
model = nn.DataParallel(model, device_ids=device_ids)
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
if hasattr(
model if not isinstance(model, nn.DataParallel) else model,
'quantizer_parameters'):
param_groups = [{
'params': model.quantizer_parameters() if not isinstance(model, nn.DataParallel) \
else model.module.quantizer_parameters(),
'lr': args.qat_quantizer_lr,
'name': 'quantizer'
}, {
'params': model.non_quantizer_parameters() if not isinstance(model, nn.DataParallel) \
else model.module.non_quantizer_parameters(),
'lr': args.qat_weight_lr,
'name': 'weight'
}]
optimizer = torch.optim.Adam(
param_groups, args.qat_weight_lr, weight_decay=args.weight_decay)
else:
param_groups = [{
'params': model.parameters() if not isinstance(model, nn.DataParallel) \
else model.module.parameters(),
'lr': args.pruning_lr,
'name': 'weight'
}]
optimizer = torch.optim.Adam(
param_groups, args.pruning_lr, weight_decay=args.weight_decay)
for epoch in range(args.epochs):
progress = ProgressMeter(
len(train_loader) * args.epochs,
[batch_time, data_time, losses, top1, top5],
prefix="Epoch[{}], Step: ".format(epoch))
for i, (images, target) in enumerate(train_loader):
end = time.time()
# measure data loading time
data_time.update(time.time() - end)
step = len(train_loader) * epoch + i
adjust_learning_rate(optimizer, epoch, step)
loss, acc1, acc5 = train_one_step(model, (images, target), criterion,
optimizer, step, device)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
if step % args.display_freq == 0:
progress.display(step)
if step % args.val_freq == 0:
# evaluate on validation set
acc1 = evaluate(val_loader, model, criterion)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
filepath = save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict() if not isinstance(model, nn.DataParallel) \
else model.module.state_dict(),
'best_acc1': best_acc1,
}, is_best, args.save_dir)
if is_best:
best_filepath = filepath
if hasattr(model, 'slim_state_dict'):
torch.save(model.slim_state_dict(), 'resnet18_pruned_best.pth')
if hasattr(model, 'sparse_state_dict'):
torch.save(model.sparse_state_dict(), 'resnet18_sparse_best.pth')
return best_filepath
def calibration_fn(model, train_loader, number_forward=100):
model.train()
print("Adaptive BN atart...")
with torch.no_grad():
for index, (images, target) in enumerate(train_loader):
images = images.cuda()
model(images)
if index > number_forward:
break
print("Adaptive BN end...") | null |
24,442 | import os
import re
import sys
import argparse
import time
import pdb
import random
from pytorch_nndct.apis import torch_quantizer
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet18
from tqdm import tqdm
import random
import os
import numpy as np
def seed_all(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False | null |
24,443 | import os
import re
import sys
import argparse
import time
import pdb
import random
from pytorch_nndct.apis import torch_quantizer
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet18
from tqdm import tqdm
import random
import os
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args, _ = parser.parse_known_args()
def load_data(train=True,
data_dir='dataset/imagenet',
batch_size=128,
subset_len=None,
sample_method='random',
distributed=False,
model_name='resnet18',
**kwargs):
#prepare data
# random.seed(12345)
traindir = data_dir + '/train'
valdir = data_dir + '/val'
train_sampler = None
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if model_name == 'inception_v3':
size = 299
resize = 299
else:
size = 224
resize = 256
if train:
dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if subset_len:
assert subset_len <= len(dataset)
if sample_method == 'random':
dataset = torch.utils.data.Subset(
dataset, random.sample(range(0, len(dataset)), subset_len))
else:
dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=(train_sampler is None),
sampler=train_sampler,
**kwargs)
else:
dataset = torchvision.datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(resize),
transforms.CenterCrop(size),
transforms.ToTensor(),
normalize,
]))
if subset_len:
assert subset_len <= len(dataset)
if sample_method == 'random':
dataset = torch.utils.data.Subset(
dataset, random.sample(range(0, len(dataset)), subset_len))
else:
dataset = torch.utils.data.Subset(dataset, list(range(subset_len)))
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, **kwargs)
return data_loader, train_sampler
def evaluate(model, val_loader, loss_fn):
model.eval()
model = model.to(device)
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
total = 0
Loss = 0
for iteraction, (images, labels) in tqdm(
enumerate(val_loader), total=len(val_loader)):
images = images.to(device)
labels = labels.to(device)
#pdb.set_trace()
outputs = model(images)
loss = loss_fn(outputs, labels)
Loss += loss.item()
total += images.size(0)
acc1, acc5 = accuracy(outputs, labels, topk=(1, 5))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
return top1.avg, top5.avg, Loss / total
class torch_quantizer():
def __init__(self,
quant_mode: str, # ['calib', 'test']
module: Union[torch.nn.Module, List[torch.nn.Module]],
input_args: Union[torch.Tensor, Sequence[Any]] = None,
input_kwargs: Dict = None,
state_dict_file: Optional[str] = None,
output_dir: str = "quantize_result",
bitwidth: int = None,
mix_bit: bool = False,
device: torch.device = torch.device("cuda"),
lstm: bool = False,
app_deploy: str = "CV",
qat_proc: bool = False,
custom_quant_ops: List[str] = None,
quant_config_file: Optional[str] = None,
target: Optional[str] = None,
dynamo: bool = False):
self.device = device
input_data = StandardInputData(input_args, input_kwargs, device)
vaiq_system_info(device)
if NndctOption.nndct_target.value:
target = NndctOption.nndct_target.value
if NndctOption.nndct_inspect_test.value and target:
from pytorch_nndct.apis import Inspector
inspector = Inspector(target)
inspector.inspect(module, input_args, device, output_dir)
if bitwidth is None and quant_config_file is None:
bitwidth = 8
if app_deploy == "CV": lstm_app = False
elif app_deploy == "NLP": lstm_app = True
self._qat_proc = False
if qat_proc:
if bitwidth is None:
bitwidth = 8
self.processor = QatProcessor(model = module,
inputs = input_args,
bitwidth = bitwidth,
mix_bit = mix_bit,
device = device)
self._qat_proc = True
elif lstm:
self.processor = RNNQuantProcessor(quant_mode = quant_mode,
module = module,
input_args = input_args,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
# lstm IP only support 16 bit activation
bitwidth_a = 16,
device = device,
lstm_app = lstm_app,
quant_config_file = quant_config_file)
elif dynamo:
from pytorch_nndct.qproc.base import DynamoQuantProcessor
self.processor = DynamoQuantProcessor(quant_mode=quant_mode,
module=module,
input_args=input_args,
output_dir=output_dir,
bitwidth_w=bitwidth,
bitwidth_a=bitwidth,
device=device,
lstm_app=lstm_app,
quant_config_file=quant_config_file,
target=target)
else:
self.processor = TorchQuantProcessor(quant_mode = quant_mode,
module = module,
input_data = input_data,
state_dict_file = state_dict_file,
output_dir = output_dir,
bitwidth_w = bitwidth,
bitwidth_a = bitwidth,
device = device,
lstm_app = lstm_app,
custom_quant_ops = custom_quant_ops,
quant_config_file = quant_config_file,
target = target)
# Finetune parameters,
# After finetuning, run original forwarding code for calibration
# After calibration, run original forwarding code to test quantized model accuracy
def fast_finetune(self, run_fn, run_args):
self.processor.finetune(run_fn, run_args)
# load finetuned parameters
def load_ft_param(self):
#self.processor.advanced_quant_setup()
self.processor.quantizer.load_param()
# calibration can be called in the same process with test and deploy
def quantize(self, run_fn, run_args, ft_run_args=None):
self.processor.quantize(run_fn, run_args, ft_run_args)
# test can be called in the same process with calibration and deploy
def test(self, run_fn, run_args):
self.processor.test(run_fn, run_args)
# deploy can be called in the same process with calibration and test
def deploy(self, run_fn, run_args, fmt='xmodel'):
self.processor.deploy(run_fn, run_args, fmt)
# export quantization steps information for tensors to be quantized
def export_quant_config(self):
self.processor.export_quant_config()
# export xmodel for compilation
def export_xmodel(self, output_dir="quantize_result", deploy_check=False, dynamic_batch=False):
self.processor.export_xmodel(output_dir, deploy_check, dynamic_batch)
def export_onnx_model(self, output_dir="quantize_result", verbose=False, dynamic_batch=False, opset_version=None, native_onnx=True, dump_layers=False, check_model=False, opt_graph=False):
self.processor.export_onnx_model(output_dir, verbose, dynamic_batch, opset_version, native_onnx, dump_layers, check_model, opt_graph)
def export_traced_torch_script(self, output_dir="quantize_result", verbose=False):
NndctScreenLogger().warning(
'"export_traced_torch_script" is deprecated and will be removed in the future. '
'Use "export_torch_script" instead.')
self.processor.export_traced_torch_script(output_dir, verbose)
def export_torch_script(self, output_dir="quantize_result", verbose=False):
return self.processor.export_torch_script(output_dir, verbose)
def quant_model(self):
NndctScreenLogger().info(f"=>Get module with quantization.")
return self.processor.quant_model()
def deploy_model(self):
if not self._qat_proc:
NndctScreenLogger().warning2user(QWarning.DEPLOY_MODEL, f"Only quant aware training process has deployable model.")
return
NndctScreenLogger().info(f"=>Get deployable module.")
return self.processor.deploy_model()
class Inspector(object):
def __init__(self, name_or_fingerprint: str):
"""The inspector is design to diagnoise neural network(NN) model under different architecure of DPU.
It's very useful to find which type of device will be assigned to the operator in NN model.
It can provide hardware constraints messages for user to optimize NN model for deployment.
"""
if NndctOption.nndct_use_old_inspector.value is True:
from pytorch_nndct.hardware import InspectorImpl
else:
from pytorch_nndct.hardware_v3 import InspectorImpl
NndctScreenLogger().info("Inspector is on.")
in_type = "name"
if name_or_fingerprint.startswith("0x"):
in_type = "fingerprint"
self._inspector_impl = None
if in_type == "name":
self._inspector_impl = InspectorImpl.create_by_DPU_arch_name(name_or_fingerprint)
else:
self._inspector_impl = InspectorImpl.create_by_DPU_fingerprint(name_or_fingerprint)
def inspect(self, module: torch.nn.Module,
input_args: Union[torch.Tensor, Tuple[Any]],
device: torch.device = torch.device("cuda"),
output_dir: str = "quantize_result",
verbose_level: int = 1,
image_format: Optional[str] = None):
NndctScreenLogger().info(f"=>Start to inspect model...")
self._inspector_impl.inspect(module, input_args, device, output_dir, verbose_level)
if image_format is not None:
available_format = ["svg", "png"]
NndctScreenLogger().check2user(QError.INSPECTOR_OUTPUT_FORMAT, f"Only support dump svg or png format.", image_format in available_format)
self._inspector_impl.export_dot_image_v2(output_dir, image_format)
NndctScreenLogger().info(f"=>Finish inspecting.")
def get_graph_id():
return next(GLOBAL_MAP.get_ele(NNDCT_KEYS.GRAPH_COUNTER))
def init_wego_dynamo_env(output_dir, device=torch.device("cpu"), quant_config_file=None):
torch._dynamo.reset()
nndct_utils.create_work_dir(output_dir)
# Parse the quant config file
QConfiger = TorchQConfig()
#if quant_config_file:
QConfiger.parse_config_file(quant_config_file,
bit_width_w=8,
bit_width_a=8,
mix_bit=False)
qconfig = QConfiger.qconfig
quantizer, qmode = init_quant_env("test", output_dir, qconfig)
# device = torch.device("")
GLOBAL_MAP.set_map(NNDCT_KEYS.QUANT_DEVICE, device)
root_graph = build_root_graph()
init_graph_counter()
quant_model_lst = []
_wego_traced_script_fn = functools.partial(_gen_traced_quantized_script,
quantizer=quantizer,
root_graph=root_graph,
quant_model_lst=quant_model_lst,
device=device)
GLOBAL_MAP.set_map(NNDCT_KEYS.WEGO_DYNAMO_SCRIPTER, _wego_traced_script_fn)
def get_traced_quantized_script(gm, example_inputs, graph_id):
scripter_fn = GLOBAL_MAP.get_ele(NNDCT_KEYS.WEGO_DYNAMO_SCRIPTER)
script_model = scripter_fn(gm, example_inputs, graph_id=graph_id)
return script_model
def quantization(title='optimize',
model_name='',
file_path=''):
data_dir = args.data_dir
quant_mode = args.quant_mode
finetune = args.fast_finetune
deploy = args.deploy
batch_size = args.batch_size
subset_len = args.subset_len
inspect = args.inspect
config_file = args.config_file
target = args.target
if quant_mode != 'test' and deploy:
deploy = False
print(r'Warning: Exporting xmodel needs to be done in quantization test mode, turn off it in this running!')
if deploy and (batch_size != 1 or subset_len != 1):
print(r'Warning: Exporting xmodel needs batch size to be 1 and only 1 iteration of inference, change them automatically!')
batch_size = 1
subset_len = 1
model = resnet18().cpu()
model.load_state_dict(torch.load(file_path))
input = torch.randn([batch_size, 3, 224, 224])
if quant_mode == 'float':
quant_model = model
if inspect:
if not target:
raise RuntimeError("A target should be specified for inspector.")
import sys
from pytorch_nndct.apis import Inspector
# create inspector
inspector = Inspector(target) # by name
# start to inspect
inspector.inspect(quant_model, (input,), device=device)
sys.exit()
else:
## new api
####################################################################################
# set dynamo=True to turn on dynamo quantization flow
quantizer = torch_quantizer(
quant_mode, model, (input), device=device, quant_config_file=config_file, target=target,
dynamo=True, output_dir="dynamo")
quant_model = quantizer.quant_model
#####################################################################################
# to get loss value after evaluation
loss_fn = torch.nn.CrossEntropyLoss().to(device)
val_loader, _ = load_data(
subset_len=subset_len,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
# fast finetune model or load finetuned parameter before test
if finetune == True:
ft_loader, _ = load_data(
subset_len=5120,
train=False,
batch_size=batch_size,
sample_method='random',
data_dir=data_dir,
model_name=model_name)
if quant_mode == 'calib':
quantizer.fast_finetune(evaluate, (quant_model, ft_loader, loss_fn))
elif quant_mode == 'test':
quantizer.load_ft_param()
# record modules float model accuracy
# add modules float model accuracy here
acc_org1 = 0.0
acc_org5 = 0.0
loss_org = 0.0
#register_modification_hooks(model_gen, train=False)
acc1_gen, acc5_gen, loss_gen = evaluate(quant_model, val_loader, loss_fn)
# logging accuracy
print('loss: %g' % (loss_gen))
print('top-1 / top-5 accuracy: %g / %g' % (acc1_gen, acc5_gen))
# handle quantization result
if quant_mode == 'calib':
quantizer.export_quant_config()
# Here is a example to show how to integrate quantizer with wego
if quant_mode == "test":
from pytorch_nndct.qproc.dynamo import get_graph_id, get_traced_quantized_script, init_wego_dynamo_env
from torch._dynamo.backends.common import fake_tensor_unsupported
init_wego_dynamo_env(output_dir="dynamo", device=device)
@fake_tensor_unsupported
def wego_compiler(gm, example_inputs):
graph_id = get_graph_id()
quantized_script = get_traced_quantized_script(gm, example_inputs, graph_id)
# add wego compile flow
# such as:
# compiled_script = wego_compile(quantized_script, example_inputs)
def _call(*args):
ret = quantized_script(*args)
# ret = compiled_script(*args)
if isinstance(ret, tuple):
return ret
else:
return (ret,)
return _call
compiled_model = torch._dynamo.optimize(backend=wego_compiler, nopython=False)(model)
acc1_gen, acc5_gen, loss_gen = evaluate(compiled_model, val_loader, loss_fn)
print('loss: %g' % (loss_gen))
print('top-1 / top-5 accuracy: %g / %g' % (acc1_gen, acc5_gen)) | null |
24,444 | from torch import Tensor, nn
import torch
import numpy as np
from pytorch_nndct.expanding.structured import ExpandingRunner
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet152
from torchvision.models.inception import inception_v3
import argparse
args, _ = parser.parse_known_args()
class MnistConvnet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3)
self.relu1 = nn.ReLU(True)
self.bn1 = nn.BatchNorm2d(32)
self.max_pooling1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, 2)
self.relu2 = nn.ReLU(True)
self.instance_norm = nn.InstanceNorm2d(64, affine=True)
self.max_pooling2 = nn.MaxPool2d(2)
self.flatten = nn.Flatten()
self.linear = nn.Linear(2304, 10)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.bn1(x)
x = self.max_pooling1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.instance_norm(x)
x = self.max_pooling2(x)
x = self.flatten(x)
x = self.linear(x)
return x
def do_expanding(model: nn.Module, input_signature: Tensor, channel_divisible: int):
expanding_runner = ExpandingRunner(model, input_signature)
expanded_model, _ = expanding_runner.expand(channel_divisible)
result1 = model.eval()(input_signature).detach().numpy()
result2 = expanded_model.eval()(input_signature).detach().numpy()
abs_diff = abs(result1 - result2)
relative_diff = abs_diff / np.concatenate((abs(result1), abs(result2)), axis=0).max(axis=0)
print("min_relative_diff = {}\nmax_relative_diff = {}\naverate_relative_diff = {}".\
format(relative_diff.min(), relative_diff.max(), relative_diff.mean()))
def mnist_expanding():
model = MnistConvnet()
input_signature = torch.rand([1, 1, 28, 28])
print("expanding mnist")
do_expanding(model, input_signature, args.channel_divisible) | null |
24,445 | from torch import Tensor, nn
import torch
import numpy as np
from pytorch_nndct.expanding.structured import ExpandingRunner
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet152
from torchvision.models.inception import inception_v3
import argparse
args, _ = parser.parse_known_args()
def do_expanding(model: nn.Module, input_signature: Tensor, channel_divisible: int):
expanding_runner = ExpandingRunner(model, input_signature)
expanded_model, _ = expanding_runner.expand(channel_divisible)
result1 = model.eval()(input_signature).detach().numpy()
result2 = expanded_model.eval()(input_signature).detach().numpy()
abs_diff = abs(result1 - result2)
relative_diff = abs_diff / np.concatenate((abs(result1), abs(result2)), axis=0).max(axis=0)
print("min_relative_diff = {}\nmax_relative_diff = {}\naverate_relative_diff = {}".\
format(relative_diff.min(), relative_diff.max(), relative_diff.mean()))
def resnet18_expanding():
model = resnet18()
input_signature = torch.randn([1, 3, 224, 224], dtype=torch.float32)
print("expanding resnet18")
do_expanding(model, input_signature, args.channel_divisible) | null |
24,446 | from torch import Tensor, nn
import torch
import numpy as np
from pytorch_nndct.expanding.structured import ExpandingRunner
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet152
from torchvision.models.inception import inception_v3
import argparse
args, _ = parser.parse_known_args()
def do_expanding(model: nn.Module, input_signature: Tensor, channel_divisible: int):
def resnet34_expanding():
model = resnet34()
input_signature = torch.randn([1, 3, 224, 224], dtype=torch.float32)
print("expanding resnet18")
do_expanding(model, input_signature, args.channel_divisible) | null |
24,447 | from torch import Tensor, nn
import torch
import numpy as np
from pytorch_nndct.expanding.structured import ExpandingRunner
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet152
from torchvision.models.inception import inception_v3
import argparse
args, _ = parser.parse_known_args()
def do_expanding(model: nn.Module, input_signature: Tensor, channel_divisible: int):
expanding_runner = ExpandingRunner(model, input_signature)
expanded_model, _ = expanding_runner.expand(channel_divisible)
result1 = model.eval()(input_signature).detach().numpy()
result2 = expanded_model.eval()(input_signature).detach().numpy()
abs_diff = abs(result1 - result2)
relative_diff = abs_diff / np.concatenate((abs(result1), abs(result2)), axis=0).max(axis=0)
print("min_relative_diff = {}\nmax_relative_diff = {}\naverate_relative_diff = {}".\
format(relative_diff.min(), relative_diff.max(), relative_diff.mean()))
def resnet50_expanding():
model = resnet50()
input_signature = torch.randn([1, 3, 224, 224], dtype=torch.float32)
print("expanding resnet18")
do_expanding(model, input_signature, args.channel_divisible) | null |
24,448 | from torch import Tensor, nn
import torch
import numpy as np
from pytorch_nndct.expanding.structured import ExpandingRunner
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet152
from torchvision.models.inception import inception_v3
import argparse
args, _ = parser.parse_known_args()
def do_expanding(model: nn.Module, input_signature: Tensor, channel_divisible: int):
expanding_runner = ExpandingRunner(model, input_signature)
expanded_model, _ = expanding_runner.expand(channel_divisible)
result1 = model.eval()(input_signature).detach().numpy()
result2 = expanded_model.eval()(input_signature).detach().numpy()
abs_diff = abs(result1 - result2)
relative_diff = abs_diff / np.concatenate((abs(result1), abs(result2)), axis=0).max(axis=0)
print("min_relative_diff = {}\nmax_relative_diff = {}\naverate_relative_diff = {}".\
format(relative_diff.min(), relative_diff.max(), relative_diff.mean()))
def resnet152_expanding():
model = resnet152()
input_signature = torch.randn([1, 3, 224, 224], dtype=torch.float32)
print("expanding resnet18")
do_expanding(model, input_signature, args.channel_divisible) | null |
24,449 | from torch import Tensor, nn
import torch
import numpy as np
from pytorch_nndct.expanding.structured import ExpandingRunner
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet152
from torchvision.models.inception import inception_v3
import argparse
args, _ = parser.parse_known_args()
def do_expanding(model: nn.Module, input_signature: Tensor, channel_divisible: int):
expanding_runner = ExpandingRunner(model, input_signature)
expanded_model, _ = expanding_runner.expand(channel_divisible)
result1 = model.eval()(input_signature).detach().numpy()
result2 = expanded_model.eval()(input_signature).detach().numpy()
abs_diff = abs(result1 - result2)
relative_diff = abs_diff / np.concatenate((abs(result1), abs(result2)), axis=0).max(axis=0)
print("min_relative_diff = {}\nmax_relative_diff = {}\naverate_relative_diff = {}".\
format(relative_diff.min(), relative_diff.max(), relative_diff.mean()))
def inception_expanding():
model = inception_v3(init_weights=True)
input_signature = torch.randn([1, 3, 299, 299], dtype=torch.float32)
print("expanding inception_v3")
do_expanding(model, input_signature, args.channel_divisible) | null |
24,450 | import tensorflow as tf
from tensorflow.keras import layers
from tf_nndct.optimization.expanding import ExpandingRunner
import numpy as np
keras = tf.keras
def mnist_convnet():
num_classes = 10
input_shape = (28, 28, 1)
model = keras.Sequential([
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
])
return model, input_shape | null |
24,451 | import tensorflow as tf
from tensorflow.keras import layers
from tf_nndct.optimization.expanding import expand_and_export
keras = tf.keras
def mnist_convnet():
num_classes = 10
input_shape = (28, 28, 1)
model = keras.Sequential([
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
])
return model, input_shape | null |
24,452 | from pytorch_nndct.expanding.expanding_lib import expand_and_export, load_expanded_model
from torchvision.models.inception import inception_v3
import torch
from torch import nn
import os
import onnxruntime
import argparse
import numpy as np
model = inception_v3(init_weights=True).eval()
input_signature = torch.rand((1, 3, 224, 224), dtype=torch.float32)
channel_divisibles = [int(i) for i in args.channel_divisibles.split(",")]
out_dir = args.out_dir
def expand_and_export(model_name: str,
model: nn.Module,
input_signature: torch.Tensor,
channel_divisibles: List[int],
output_dir: str,
onnx_export_kwargs: Mapping[str, Any] = {},
export_fp16_model=True,
exclude_nodes: List[str] = []) -> None:
expanding_runner = ExpandingRunner(model, input_signature)
for channel_divisible in channel_divisibles:
dir_path = os.path.join(output_dir,
model_name + "_padded_{}".format(channel_divisible))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
expanded_model, expanding_spec = expanding_runner.expand(
channel_divisible, exclude_nodes)
expanded_model.eval()
torch.save(expanded_model.state_dict(),
os.path.join(dir_path, model_name + ".pth"))
with open(os.path.join(dir_path, "expanding_spec"), 'w') as f:
f.write(expanding_spec.serialize())
torch.onnx.export(
expanded_model,
input_signature,
os.path.join(dir_path, model_name + "_fp32.onnx"),
export_params=True,
opset_version=10,
do_constant_folding=True,
**onnx_export_kwargs)
if export_fp16_model:
expanded_model = expanded_model.cuda().half().eval()
torch.onnx.export(
expanded_model,
input_signature.cuda().half(),
os.path.join(dir_path, model_name + "_fp16.onnx"),
export_params=True,
opset_version=10,
do_constant_folding=True,
**onnx_export_kwargs)
def do_expand_and_export():
expand_and_export("inception_v3", model, input_signature, channel_divisibles, out_dir,
onnx_export_kwargs= {
"input_names": ['input'],
"output_names": ['output'],
"dynamic_axes": {'input' : {0 : 'batch_size'},
'output' : {0 : 'batch_size'}}
}) | null |
24,453 | from pytorch_nndct.expanding.expanding_lib import expand_and_export, load_expanded_model
from torchvision.models.inception import inception_v3
import torch
from torch import nn
import os
import onnxruntime
import argparse
import numpy as np
model_name = "inception_v3"
model = inception_v3(init_weights=True).eval()
input_signature = torch.rand((1, 3, 224, 224), dtype=torch.float32)
channel_divisibles = [int(i) for i in args.channel_divisibles.split(",")]
out_dir = args.out_dir
def load_expanded_torch_model(model_name: str, model: nn.Module, input_signature: torch.Tensor, dir_path: str):
expanding_spec_path = os.path.join(dir_path, "expanding_spec")
model_path = os.path.join(dir_path, model_name + ".pth")
return load_expanded_model(expanding_spec_path, model, input_signature, model_path)
def load_expanded_onnx_model(model_name: str, dir_path: str) -> onnxruntime.InferenceSession:
model_path = os.path.join(dir_path, model_name + "_fp32.onnx")
return onnxruntime.InferenceSession(model_path)
def summary(channel_divisible: int, raw_output: np.ndarray, torch_output: np.ndarray, onnx_output: np.ndarray) -> None:
raw_torch_diff = abs(raw_output - torch_output)
raw_torch_relative_diff = raw_torch_diff / np.concatenate((abs(raw_output), abs(torch_output)), axis=0).max(axis=0)
raw_onnx_diff = abs(raw_output - onnx_output)
raw_onnx_relative_diff = raw_onnx_diff / np.concatenate((abs(raw_output), abs(onnx_output)), axis=0).max(axis=0)
print("relative diff for channel_divisible {}".format(channel_divisible))
print("row-torch relative diff: max = {:.4f}, min = {:.4f}, average = {:.4f}"
.format(raw_torch_relative_diff.max(), raw_torch_relative_diff.min(), raw_torch_relative_diff.mean()))
print("row-onnx relative diff: max = {:.4f}, min = {:.4f}, average = {:.4f}"
.format(raw_onnx_relative_diff.max(), raw_onnx_relative_diff.min(), raw_onnx_relative_diff.mean()))
def verify():
for channel_divisible in channel_divisibles:
dir_path = os.path.join(out_dir, model_name + "_padded_{}".format(channel_divisible))
torch_model = load_expanded_torch_model(model_name, model, input_signature, dir_path).eval()
onnx_model = load_expanded_onnx_model(model_name, dir_path)
raw_output = model(input_signature).detach().numpy()
torch_output = torch_model(input_signature).detach().numpy()
onnx_output = onnx_model.run(None, {"input": input_signature.numpy()})[0]
summary(channel_divisible, raw_output, torch_output, onnx_output) | null |
24,456 | import os
import shutil
import subprocess
import sys
import setuptools.command.develop
import setuptools.command.install
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
from distutils import core
from distutils.core import Distribution
from distutils.errors import DistutilsArgError
INSTALL = False
DEVELOP = False
BDIST = False
CUDA_AVAILABLE = False
HIP_AVAILABLE = False
class install(setuptools.command.install.install):
def run(self):
setuptools.command.install.install.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
setuptools.command.develop.develop.run(self)
def build_config_setup():
# global INSTALL, DEVELOP, BDIST, CUDA_AVAILABLE
global INSTALL, DEVELOP, BDIST, CUDA_AVAILABLE, HIP_AVAILABLE
install_packages = ["nndct_shared"]
for package in install_packages:
if os.path.exists(package):
try:
os.unlink(package)
except Exception:
print("failed to do the cleaning, please clean up manully")
else:
os.symlink(f"../{package}", package)
else:
os.symlink(f"../{package}", package)
if INSTALL:
if not os.path.exists("pytorch_nndct/nn/kernel"):
os.mkdir("pytorch_nndct/nn/kernel")
with open("pytorch_nndct/nn/kernel/__init__.py", 'w') as f:
cwd = os.path.dirname(os.path.realpath(__file__))
nn_path = os.path.join(cwd, "pytorch_nndct/nn")
f.write(f"NN_PATH='{nn_path}'")
install_requires = []
if not DEVELOP:
install_requires += ["scipy<=1.9.3",
"numpy<=1.24.2",
"tqdm",
"ninja"]
extensions = []
if not BDIST:
cmdclass = {"install": install,
"develop": develop
}
else:
cmdclass = {"build_ext": BuildExtension}
extra_compile_args = {'cxx': ['-std=c++14', '-fPIC']}
cwd = os.path.dirname(os.path.realpath(__file__))
cpu_src_path = os.path.join(cwd, "../csrc/cpu")
source_files = []
for name in os.listdir(cpu_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpu_src_path, name))
include_dir = [
os.path.join(cwd, "../include/cpu"),
os.path.join(cwd, "pytorch_nndct/nn/include")
]
Extension = CppExtension
if CUDA_AVAILABLE:
extra_compile_args['nvcc'] = ['-O2','-arch=sm_35']
cuda_src_path = os.path.join(cwd, "../csrc/cuda")
for name in os.listdir(cuda_src_path):
if name.split(".")[-1] in ["cu", "cpp", "cc", "c"]:
source_files.append(os.path.join(cuda_src_path, name))
cpp_src_path = os.path.join(cwd, "pytorch_nndct/nn/src/cuda")
for name in os.listdir(cpp_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpp_src_path, name))
include_dir.append(os.path.join(cwd, "../include/cuda"))
from torch.utils.cpp_extension import CUDAExtension
Extension = CUDAExtension
elif HIP_AVAILABLE:
extra_compile_args['hipcc'] = ['-O2','-arch=sm_35']
hip_src_path = os.path.join(cwd, "../csrc/cuda")
for name in os.listdir(hip_src_path):
if name.split(".")[-1] in ["cu", "cpp", "cc", "c"]:
source_files.append(os.path.join(hip_src_path, name))
cpp_src_path = os.path.join(cwd, "pytorch_nndct/nn/src/cuda")
for name in os.listdir(cpp_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpp_src_path, name))
include_dir.append(os.path.join(cwd, "../include/cuda"))
# CUDAExtension class has checks for HIP backend
from torch.utils.cpp_extension import CUDAExtension
Extension = CUDAExtension
else:
cpp_src_path = os.path.join(cwd, "pytorch_nndct/nn/src/cpu")
for name in os.listdir(cpp_src_path):
if name.split(".")[-1] in ["cpp", "cc", "c"]:
source_files.append(os.path.join(cpp_src_path, name))
kernel_ext = Extension(name='pytorch_nndct.nn._kernels',
language='c++',
sources=source_files,
include_dirs=include_dir,
extra_compile_args=extra_compile_args)
extensions.append(kernel_ext)
return extensions, cmdclass, install_requires | null |
24,461 | import torch
from torch.autograd import Variable
import math
from nndct_shared.utils import NndctOption, NndctScreenLogger, QError, QWarning
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from .quant_noise import eval_qnoise
import pytorch_nndct.utils as py_utils
import torch.nn.functional as F
class deephi_Conv3d(torch.nn.modules.conv.Conv3d):
def __init__(self, *args, **kwards):
def forward(self, input):
def fp32_forward(self, input):
def fake_quantize_forward(self, input):
def bias_corr(self):
def Conv3d(*args, **kwargs):
quant_mode, _ = maybe_get_quantizer()
if quant_mode == None:
return torch.nn.Conv3d(*args, **kwargs)
return deephi_Conv3d(*args, **kwargs) | null |
24,462 | import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
class deephi_Sub(torch.nn.Module):
def __init__(self):
super(deephi_Sub, self).__init__()
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
def forward(self, input, other, alpha=1):
[qinput, qother] = quantize_tensors(
[input, other],
self.node,
tensor_type='input')
output = torch.sub(input=qinput, other=qother, alpha=alpha)
output = quantize_tensors([output], self.node)[0]
return output
def Sub(*args, **kwargs):
return deephi_Sub(*args, **kwargs) | null |
24,467 | import torch
from torch.autograd import Variable
import torch.nn.functional as F
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
from nndct_shared.utils import NndctOption
import pytorch_nndct.utils as py_utils
from nndct_shared.utils import NNDCT_KEYS, GLOBAL_MAP
class deephi_Hardsigmoid(torch.nn.Module):
r"""DeePhi Conv2d operation, support float and double"""
def __init__(self, inplace=False, *args, **kwards):
super(deephi_Hardsigmoid, self).__init__()
self.quant_mode, self.quantizer = maybe_get_quantizer()
self.node = None
self.inplace = inplace
def forward(self, input):
quant_config = GLOBAL_MAP.get_ele(NNDCT_KEYS.QUANT_CONFIG)
if self.quant_mode is None or NndctOption.nndct_quant_off.value:
return torch.div(F.relu6(torch.add(input, 3.)), 6.)
elif(quant_config['target_device'] == "FLEXML"):
qinput = quantize_tensors([input],self.node,tensor_type='input')[0]
output = torch.nn.functional.hardsigmoid(qinput)
output = quantize_tensors([output],self.node)[0]
return output
else:
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = F.relu6(torch.add(qinput, 3.))
# scale to DPU accuracy
scale = 2731.0 / 16384.0
output = output * scale
output = quantize_tensors([output], self.node)[0]
return output
def Hardsigmoid(*args, **kwargs):
return deephi_Hardsigmoid(*args, **kwargs) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.