id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
23,226 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def print_mat(mat, name="tmp mat", col=20, t=0, channel=sys.stdout):
max_val = mat.max()
min_val = mat.min()
sum_val = mat.sum()
if isinstance(max_val, float) or isinstance(max_val, int):
pass
else:
max_val = max_val[0]
min_val = min_val[0]
sum_val = sum_val[0]
if len(mat.shape) == 2:
channel.write("showing " + str(t) + " th " + name + str(mat.shape) + "\n")
channel.write("sum:" + str(sum_val) + "; max:" + str(max_val) + " min:" +
str(min_val) + "\n")
for r in range(mat.shape[0]):
for c in range(col):
channel.write(str(mat[r, c]) + " ")
channel.write("\n")
elif len(mat.shape) == 1:
channel.write("showing " + str(t) + " th " + name + "\n")
channel.write("sum:" + str(sum_val) + "; max:" + str(max_val) + " min:" +
str(min_val) + "\n")
for r in range(col):
channel.write(str(mat[r]) + " ")
channel.write("\n")
else:
raise Exception("wrong mode!") | null |
23,227 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def latest_file(folder, file_checker=None):
lists = os.listdir(folder)
lists.sort(key=lambda fn: os.path.getmtime(os.path.join(folder, fn)))
for file_name in lists[::-1]:
abs_file = os.path.join(folder, file_name)
if not file_checker or file_checker(abs_file):
return abs_file | null |
23,228 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
GLOBAL_MAP = GlobalMap()
def nndct_info_print(string):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if logger:
logger.info("[NNDCT_INFO] {}".format(string))
else:
print("[NNDCT_INFO] {}".format(string)) | null |
23,229 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
GLOBAL_MAP = GlobalMap()
def nndct_warn_print(string):
if True == GLOBAL_MAP.get_ele(NNDCT_KEYS.WARN_FLAG):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if logger:
logger.warning("[NNDCT_WARN] {}".format(string))
else:
print("[NNDCT_WARN] {}".format(string)) | null |
23,230 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
GLOBAL_MAP = GlobalMap()
def nndct_error_print(string):
if True == GLOBAL_MAP.get_ele(NNDCT_KEYS.ERROR_FLAG):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if logger:
logger.error("[NNDCT_ERROR] {}".format(string))
else:
print("[NNDCT_ERROR] {}".format(string))
sys.exit(1) | null |
23,231 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
def obj_to_str(obj):
if isinstance(obj, list):
string = '\n'.join(["{}".format(n) for n in obj])
elif isinstance(obj, dict):
string = '\n'.join(["{} : {}".format(k, v) for k, v in obj.items()])
elif isinstance(obj, str):
string = obj
else:
raise Exception("nndct_details_debug only support list and dictionary")
return string
def nndct_debug_print(string, title='', level=1):
if True == GLOBAL_MAP.get_ele(
NNDCT_KEYS.DEBUG_FLAG) and level <= GLOBAL_MAP.get_ele(
NNDCT_KEYS.VERBOSE_LEVEL):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if title == 'Start':
string = "\n********************* <{} : {}> *********************".format(
title, string)
elif title == 'End':
string = "\n********************* <{} : {}> *********************\n".format(
title, string)
if logger:
logger.debug("[NNDCT_DEBUG_Lv_{}] {}".format(level, string))
else:
print("[NNDCT_DEBUG_Lv_{}] {}".format(level, string))
def nndct_details_debug(obj, title, level=NNDCT_DEBUG_LVL.DETAILS):
nndct_debug_print(
"\n********************* <Start : {}> *********************\n{}".format(
title, obj_to_str(obj)),
level=level)
nndct_debug_print(title, title='End', level=level) | null |
23,232 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
GLOBAL_MAP = GlobalMap()
def nndct_info(func):
def wrapper(*args, **kwargs):
info_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.INFO_FLAG)
if info_flag == True:
print("[NNDCT_INFO]", end='')
return func(*args, **kwargs)
return wrapper | null |
23,233 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
GLOBAL_MAP = GlobalMap()
def nndct_warn(func):
def wrapper(*args, **kwargs):
warn_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.WARN_FLAG)
if warn_flag == True:
print("[NNDCT_WARN]", end='')
return func(*args, **kwargs)
return wrapper | null |
23,234 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
GLOBAL_MAP = GlobalMap()
def nndct_debug(func):
def wrapper(*args, **kwargs):
debug_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.DEBUG_FLAG)
if debug_flag == True:
print("[NNDCT_DEBUG]", end='')
return func(*args, **kwargs)
return wrapper | null |
23,235 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
GLOBAL_MAP = GlobalMap()
def nndct_error(func):
def wrapper(*args, **kwargs):
error_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.ERROR_FLAG)
if error_flag == True:
print("[NNDCT_ERROR]", end='')
return func(*args, **kwargs)
if error_flag == True:
exit(1)
return wrapper | null |
23,236 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
def get_nndct_logger(filename='NndctGen_log'):
log_dir = os.path.dirname(filename)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
logger = logging.getLogger(filename.replace("/", 'SPL'))
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
sh = logging.StreamHandler()
fh = logging.FileHandler(filename, mode='w', encoding=None, delay=False)
for h in [sh, fh]:
h.setLevel(logging.INFO)
h.setFormatter(formatter)
logger.addHandler(h)
return logger | null |
23,237 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
def get_config_str(obj,
title,
ignore_prefix=[],
ignore_suffix=[],
ignore_keys=[]):
assert hasattr(
obj, 'default_kwargs'
), 'object {} has no default_kwargs, failed to generate configuration string'.format(
obj)
config_str = '\n' + ">> <{}>".format(title) + '\n>> '
for key in obj.default_kwargs:
value = getattr(obj, key, None)
if value and not any(key.endswith(s) for s in ignore_suffix) and \
not any(key.startswith(p) for p in ignore_prefix) and \
key not in ignore_keys:
if isinstance(value, dict):
config_str += '\n>> {}: \n>> {}'.format(
key, '\n>> '.join(
['{} : {}'.format(k, v) for k, v in value.items()]))
else:
config_str += '\n>> {} : {}'.format(key, value)
return config_str | null |
23,238 | import math
from nndct_shared.base import NNDCT_OP
def calculate_op_scale(rec, node):
scale = 1.0
if node.op.type in [NNDCT_OP.MEAN]:
max_factor = math.ceil(math.log(rec * 128,2))
diff = 1.0
multi_factor = 0.0
shift_factor = 0.0
for shift_factor_ in range(max_factor):
factor = round((2 ** shift_factor_)/rec)
diff_ = abs(factor / (2 ** shift_factor_) - 1/rec)
if diff_ < diff:
multi_factor = factor
diff = diff_
shift_factor = shift_factor_
scale = rec * multi_factor / (2 ** shift_factor)
return scale | null |
23,241 | from nndct_shared.utils.tensor_util import DataFormatMap
from typing import List
def generate_indices_group(indices: List[int], dim_size: int,
groups: int) -> List[List[int]]:
indices_set = set(indices)
interval: int = dim_size // groups
start_idx = 0
end_idx = interval
ret: List[List[int]] = []
while start_idx < dim_size:
idx_group: List[int] = []
for i in range(start_idx, end_idx):
if i in indices_set:
idx_group.append(i - start_idx)
ret.append(idx_group)
start_idx = end_idx
end_idx += interval
return ret | null |
23,242 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import json
import numpy as np
import os
from typing import List
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import utils
from nndct_shared.pruning.pruning_lib import is_depthwise_conv
from nndct_shared.pruning.pruning_lib import is_grouped_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import io
from nndct_shared.utils.tensor_util import param_layout_transformer
class ModulePruningInfoGenerator(object):
def __init__(self,
nodename_to_modulename=None,
nodes_pruning_info=None,
module_pruning_info=None):
self._nodename_to_modulename = nodename_to_modulename
self._nodes_pruning_info = nodes_pruning_info
self._module_pruning_info = module_pruning_info
if not self._module_pruning_info:
self._module_pruning_info = dict()
self._generate_module_pruning_info()
def _generate_module_pruning_info(self):
for nodename, pruning_info in self._nodes_pruning_info.items():
if nodename in self._nodename_to_modulename:
modulename = self._nodename_to_modulename[nodename]
self._module_pruning_info[modulename] = pruning_info
def nodename_to_modulename(self):
return self._nodename_to_modulename
def nodename_to_modulename(self, nodename_to_modulename):
self._nodename_to_modulename = nodename_to_modulename
def nodes_pruning_info(self):
return self._nodes_pruning_info
def nodes_pruning_info(self, nodes_pruning_info):
self._nodes_pruning_info = nodes_pruning_info
def module_pruning_info(self):
return self._module_pruning_info
def serialize(self):
content = dict()
for key, value in self._module_pruning_info.items():
content[key] = value.serialize()
return content
def deserialize(cls, data):
module_pruning_info = {}
for key, value in data.items():
module_pruning_info[key] = pruning_lib.StructuredPruning.deserialize(
value)
return cls(module_pruning_info=module_pruning_info)
def to_json(self):
return json.dumps(self.serialize(), indent=2)
import os
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
def save_pruning_info(nodename_to_modulename, nodes_pruning_info, filepath):
io.create_work_dir(os.path.dirname(filepath))
with open(filepath, 'w') as f:
f.write(
ModulePruningInfoGenerator(nodename_to_modulename,
nodes_pruning_info).to_json()) | null |
23,243 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import json
import numpy as np
import os
from typing import List
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import utils
from nndct_shared.pruning.pruning_lib import is_depthwise_conv
from nndct_shared.pruning.pruning_lib import is_grouped_conv
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.utils import io
from nndct_shared.utils.tensor_util import param_layout_transformer
class ModulePruningInfoGenerator(object):
def __init__(self,
nodename_to_modulename=None,
nodes_pruning_info=None,
module_pruning_info=None):
def _generate_module_pruning_info(self):
def nodename_to_modulename(self):
def nodename_to_modulename(self, nodename_to_modulename):
def nodes_pruning_info(self):
def nodes_pruning_info(self, nodes_pruning_info):
def module_pruning_info(self):
def serialize(self):
def deserialize(cls, data):
def to_json(self):
def load_pruning_info(filepath):
with open(filepath, 'r') as f:
content = json.load(f)
return ModulePruningInfoGenerator.deserialize(content) | null |
23,247 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %s]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
basename)
return s
def get_logger():
"""Return logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('nndct')
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
def info(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(INFO)}
get_logger().info(msg, *args, extra=extra, **kwargs) | null |
23,249 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %s]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
basename)
return s
def get_logger():
"""Return logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('nndct')
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
def error(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(ERROR)}
get_logger().error(msg, *args, extra=extra, **kwargs) | null |
23,250 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def _log_prefix(level, timestamp=None, file_and_line=None):
def get_logger():
def fatal(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(FATAL)}
get_logger().fatal(msg, *args, extra=extra, **kwargs) | null |
23,253 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
def log(level, msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(level)}
get_logger().log(level, msg, *args, extra=extra, **kwargs)
def min_vlog_level():
global _min_vlog_level
if _min_vlog_level is None:
try:
_min_vlog_level = int(_os.getenv('VAI_MIN_VLOG_LEVEL', 0))
except ValueError:
_min_vlog_level = 0
return _min_vlog_level
def vlog(level, msg, *args, **kwargs):
if level <= min_vlog_level():
log(level, msg, *args, **kwargs) | null |
23,255 | import collections
import json
import os
from nndct_shared.pruning.pruning_lib import PruningSpec, NodeGroup
from nndct_shared.pruning import errors
from nndct_shared.utils import io
from typing import List
class SubnetSearcher(object):
def __init__(self, groups: List[NodeGroup]):
self._groups = groups
self._supernet = None
self._subnets = []
def set_supernet(self, score, macs=None):
self._supernet = SubnetConfig(None, score, macs)
def add_subnet(self, ratios, score, macs=None):
self._subnets.append(SubnetConfig(ratios, score, macs))
def _sorted_subnet(self):
return sorted(self._subnets, key=lambda x: x.score)
def subnet(self, index):
if not self._subnets:
raise errors.OptimizerSubnetError('No subnet candidates.')
if index and index >= len(self._subnets):
raise errors.OptimizerInvalidArgumentError(
'Subnet index is out of range [0, {}]'.format(len(self._subnets) - 1))
return self._subnets[index]
def best_subnet(self):
subnets = self._sorted_subnet()
base_score = self._supernet.score
b0 = abs(base_score - subnets[0].score)
b1 = abs(base_score - subnets[-1].score)
index = 0 if b0 < b1 else -1
return subnets[index]
def spec(self, ratios):
return PruningSpec.from_node_groups(self._groups, ratios)
def groups(self):
return self._groups
def config(self):
return self._config
def supernet(self):
return self._supernet
def supernet(self, supernet):
self._supernet = supernet
def serialize(self):
subnets = self._sorted_subnet()
ratios = []
macs = []
scores = {}
for index, subnet in enumerate(subnets):
config = subnet.serialize()
ratios.append(','.join([str(ratio) for ratio in subnet.ratios]))
macs.append(subnet.macs)
scores[index] = subnet.score
return {
'groups': [g.serialize() for g in self._groups],
'supernet': self._supernet.serialize(),
'ratios': ratios,
'macs': macs,
'scores': scores
}
def deserialize(cls, data):
instance = cls([NodeGroup.deserialize(item) for item in data['groups']])
scores = data['scores']
macs = data['macs']
for index, ratio_str in enumerate(data['ratios']):
ratio_strs = ratio_str.split(',')
ratios = [float(ratio) for ratio in ratio_strs]
instance.add_subnet(ratios, scores[str(index)], macs[index])
instance.supernet = SubnetConfig.deserialize(data['supernet'])
return instance
def to_json(self):
return json.dumps(self.serialize(), indent=2)
def load_searcher(filepath):
with open(filepath, 'r') as f:
return SubnetSearcher.deserialize(json.load(f)) | null |
23,256 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Mapping, Any, Union, Tuple
import collections
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.metaclass import Singleton
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import node_group as node_group_lib
from nndct_shared.utils import registry
TRANSPOSECONV_OPS = [OpTypes.CONVTRANSPOSE2D, OpTypes.CONVTRANSPOSE3D]
def is_transpose_conv(op):
if op.type in TRANSPOSECONV_OPS:
return True
return False | null |
23,269 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Mapping, Any, Union, Tuple
import collections
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.metaclass import Singleton
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.pruning import errors
from nndct_shared.pruning import logging
from nndct_shared.pruning import node_group as node_group_lib
from nndct_shared.utils import registry
def find_prunable_ancestor(graph, node, target_ops=CONV_OPS):
return find_ancestor(graph, node, target_ops, [OpTypes.CONCAT])
def modify_concat(graph, node, pruning_res):
out_dim_missing = False
for tensor in node.in_tensors:
input_node = tensor.node
input_pruning = pruning_res[input_node.name]
if not input_pruning.has_out_dim():
out_dim_missing = True
break
node_pruning = pruning_res[node.name]
cur_offset = 0
out_dim = 0
removed_outputs = []
for tensor in node.in_tensors:
input_node = tensor.node
input_pruning = pruning_res[input_node.name]
if input_pruning.removed_outputs and out_dim_missing:
upstream_conv = find_prunable_ancestor(graph, input_node)
raise errors.OptimizerNotExcludeNodeError(
'Must exclude node from pruning: {}.'.format(upstream_conv.name))
if not out_dim_missing:
for ro in input_pruning.removed_outputs:
removed_outputs.append(ro + cur_offset)
out_dim += input_pruning.out_dim
cur_offset += (len(input_pruning.removed_outputs) + input_pruning.out_dim)
node_pruning.removed_outputs = removed_outputs
node_pruning.out_dim = out_dim
# update removed_inputs & in_dim
node_pruning.removed_inputs = node_pruning.removed_outputs
node_pruning.in_dim = node_pruning.out_dim | null |
23,271 | from collections import deque
def graph_search_handler(start_node,
generator,
frontier,
handler=None,
gen_params={}):
frontier.append(start_node)
explored = set()
while frontier:
node = frontier.pop()
explored.add(node)
if handler:
handler(node)
frontier.extend([
n for n in generator(node, **gen_params)
if n not in explored and n not in frontier
])
return None
class FIFOQueue(Queue):
"""A First-In-First-Out Queue implemented with collections.deque
MODIFIED FROM AIMA VERSION
- Use deque
- Use an additional dict to track membership
"""
def __init__(self):
self.A = deque()
self.__keys = set()
def append(self, item):
self.A.append(item)
self.__keys.add(item)
def __len__(self):
return len(self.A)
def pop(self):
key = self.A.popleft()
self.__keys.discard(key)
return key
def __contains__(self, item):
return item in self.__keys
def breadth_first_search_handler(start_node,
generator,
handler=None,
gen_params={}):
return graph_search_handler(
start_node,
generator,
frontier=FIFOQueue(),
handler=handler,
gen_params=gen_params) | null |
23,276 | import sys
from typing import List, Optional
from nndct_shared.base import NNDCT_OP
from nndct_shared.nndct_graph import Graph, Operation, Node, Block
from nndct_shared.nndct_graph import operator_definition as base_op
from nndct_shared.utils import NndctOption, NndctScreenLogger
def convert_graph_to_block_node(top_graph, graph):
op = base_op.CustomOp(NNDCT_OP.BLOCK)
block_node = Node(name=graph.name, op=op, in_quant_part=True)
block_node.owning_block = top_graph.block
block_node.owning_graph = top_graph
block_node.add_block(graph.block)
for block in graph.all_blocks():
block.owning_graph = top_graph
for tensor in graph.tensors:
top_graph.add_tensor(tensor)
for node in graph.all_nodes():
node._idx = -1
node.owning_graph = top_graph
for param_name in graph.param_names():
top_graph.add_param_name(param_name)
return block_node
def merge_multi_graphs_to_single_graph(graphs, graph_name="Nndctgraph"):
top_graph = Graph(graph_name)
op = base_op.CustomOp(NNDCT_OP.PLACEHOLDER)
input_node = Node(name="input_placeholder", op=op, in_quant_part=False)
input_node.owning_graph = top_graph
op = base_op.CustomOp(NNDCT_OP.PLACEHOLDER)
return_node = Node(name="return_placeholder", op=op, in_quant_part=False)
return_node.owning_graph = top_graph
top_block = Block(top_graph, None, input_node, return_node)
top_graph.set_top_block(top_block)
for graph in graphs:
block_node = convert_graph_to_block_node(top_graph, graph)
if not block_node.in_node_list():
top_graph.append_node(block_node)
return top_graph | null |
23,280 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
class _Converter:
_nndct2xir_type = {np.float32: "FLOAT32",
np.float64: "FLOAT64",
np.int64: "INT64",
np.int32: "INT32",
}
_nndct2numpy_type = {
"float32": np.float32,
"float64": np.float64,
"int32": np.int32,
"int64": np.int64
}
_pad_mode = {"pad_mode": {0: "FLOOR",
1: "CEIL",
2: "SAME",
3: "VALID"}
}
_nndct2xir_value = {NNDCT_OP.CONV2D: _pad_mode,
NNDCT_OP.DEPTHWISE_CONV2D: _pad_mode,
NNDCT_OP.CONVTRANSPOSE2D: _pad_mode,
NNDCT_OP.DEPTHWISE_CONVTRANSPOSE2D: _pad_mode,
NNDCT_OP.MAX_POOL: _pad_mode,
NNDCT_OP.MAX_POOL1D: _pad_mode,
NNDCT_OP.AVG_POOL: _pad_mode,
NNDCT_OP.PAD: {"mode": {0: "CONSTANT", 1: "REFLECT", 2: "SYMMETRIC"}}
}
def to_xir_dtype(cls, numpy_dtype):
return cls._nndct2xir_type[numpy_dtype]
def to_xir_dtype_by_string(cls, dtype):
return {
"float32" : "FLOAT32",
"float64": "FLOAT64",
"int32" : "INT32",
"int64" : "INT64"
}.get(dtype, dtype)
def to_xir_attr_value(cls, node_op_type, nndct_attr_name: str, nndct_attr_value: Any):
if node_op_type not in cls._nndct2xir_value or nndct_attr_name not in cls._nndct2xir_value[node_op_type]:
return nndct_attr_value
else:
return cls._nndct2xir_value[node_op_type][nndct_attr_name][nndct_attr_value]
def to_numpy_dtype(cls, nndct_dtype):
return cls._nndct2numpy_type[nndct_dtype]
def shape(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
r""" nndct shape is a macro operator, including shape, stridedslice
"""
# raise NotImplementedError("shape")
input_list = []
shape_input_ops: Dict[str, List["xir.Op"]] = {}
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
shape_input_ops["input"] = input_list
sub_op_shape = xgraph.create_fixed_normal_op(
node.name + "_i0", "shape", quant_config, input_ops=shape_input_ops)
attrs: Dict[str, Any] = {}
strided_slice_input_ops: Dict[str, List["xir.Op"]] = {}
strided_slice_input_ops["input"] = [sub_op_shape]
dim = node.node_attr(node.op.AttrName.AXIS)
attrs["begin"] = [dim]
attrs["end"] = [dim + 1]
xgraph.create_fixed_normal_op(
node.name,
"strided_slice",
quant_config,
attrs=attrs,
input_ops=strided_slice_input_ops)
def zeros(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
shape = node.node_attr(node.op.AttrName.SHAPE)
data = np.zeros(shape, dtype=_Converter.to_numpy_dtype(node.out_tensors[0].dtype))
xgraph.create_fixed_const_op(name=node.name,
data=data,
quant_info=quant_config)
class XGraph(object):
def __init__(self, name: str):
self._graph = Graph(name)
self._const_ops: Dict[str, Op] = {}
self._ops: Dict[str, Op] = {}
def _check_inputs(self, input_ops):
if any([ip is None for ip in input_ops]):
raise RuntimeError('The input op is `None`, please check graph.')
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
if data is not None and data.ndim == 0:
data = np.array([data], data.dtype)
const_op = self._graph.create_const_op(name, data)
if name in self._const_ops:
raise RuntimeError('The const op {} has already in graph'.format(name))
return const_op
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
t_ops = []
for i, (input, tensor) in enumerate(zip(input_list, input_tensors)):
if tensor.ndim in [4, 5]:
attrs: Dict[str, Any] = {}
attrs['order'] = [0, 3, 1, 2] if tensor.ndim == 4 else [0, 4, 3, 1, 2]
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
op_name = input.get_name() + NNDCT_KEYS.TRANSPOSE_OP_SUFFIX
t_op = self.get_op_by_name(op_name)
if t_op is None:
t_op = self.create_normal_op(
op_name,
'transpose',
attrs=attrs,
input_ops=input_ops)
t_ops.append(t_op)
else:
t_ops.append(input)
return t_ops
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
if input_ops is not None:
self._check_inputs(input_ops['input'])
op = self._graph.create_op(name, kind, attrs, input_ops)
return op
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
const_op = self.create_const_op(formal_name, data)
# print(const_op.get_name(), "const", const_op.get_output_tensor().dims)
fixed_const_op = self.create_fix_op(const_op, name, quant_info)
return fixed_const_op if fixed_const_op else const_op
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self.create_normal_op(
name=formal_name,
kind=kind,
tensor=tensor,
attrs=attrs,
input_ops=input_ops)
# print(op.get_name(), kind, op.get_output_tensor().dims)
post_fixed_op = self.create_fix_op(op, name, quant_info)
return post_fixed_op if post_fixed_op else op
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
pre_fix_ops = []
for i, op in enumerate(input_list):
pre_fix_op = self.create_fix_op(op, key_name, quant_info, id=i, post_fix=False)
if pre_fix_op:
pre_fix_ops.append(pre_fix_op)
else:
pre_fix_ops.append(op)
return pre_fix_ops
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
if post_fix:
combinded_fix_infos = ChainMap(dict(quant_info['param']), dict(quant_info['output']))
else:
combinded_fix_infos = quant_info['input']
if name in combinded_fix_infos.keys():
return combinded_fix_infos[name]
else:
return None
# if NNDCT_KEYS.FIX_OP_SUFFIX in input.get_name():
# raise RuntimeError("The consecutive fix ops in graph is forbidden!")
if not isinstance(quant_info, dict):
return None
# bit_width, fix_point = _get_fix_info(key_name, quant_info)[0]
# if bit_width is None or fix_point is None:
# return None
bit_info = _get_fix_info(key_name, quant_info)
if bit_info is None or bit_info[0] is None:
return None
bit_width, fix_point = bit_info[0]
if bit_width is None or fix_point is None:
return None
attrs: Dict[str, Any] = {}
attrs['fix_point'] = fix_point
attrs['bit_width'] = bit_width
attrs['round_mode'] = "DPU_ROUND"
attrs['if_signed'] = True
input_ops: Dict[str, List[Op]] = {}
input_ops['input'] = [input]
if post_fix:
op_name = input.get_name() + NNDCT_KEYS.FIX_OP_SUFFIX
else:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", key_name)
if id is not None:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX + f"_i{id}"
else:
op_name = formal_name + NNDCT_KEYS.PRE_FIX_OP_SUFFIX
fix_op = self.create_normal_op(
op_name,
'fix',
attrs=attrs,
input_ops=input_ops)
return fix_op
def get_op_by_name(self, name: str) -> Op:
formal_name = re.sub(_XMODEL_NAME_PATTERN, "_", name)
op = self._graph.get_op(formal_name + NNDCT_KEYS.FIX_OP_SUFFIX)
if op is None:
op = self._graph.get_op(formal_name)
return op
def get_op_output_shape(self, name: str) -> List[int]:
op = self.get_op_by_name(name)
if op:
return op.get_output_tensor().dims
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"{name} is not in xmodel. Please check it.")
def export_to_xmodel(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_SUFFIX
try:
self._graph.serialize(fname)
except Exception:
raise ExportXmodelError(self._graph.get_name())
else:
NndctScreenLogger().info(f"=>Successfully convert '{self._graph.get_name()}' to xmodel.({fname})")
def export_to_img(self, fname: str) -> NoReturn:
fname += NNDCT_KEYS.XMODEL_IMAGE_SUFFIX
try:
shell_command = "which dot"
proc = subprocess.Popen(shell_command, stdout=subprocess.PIPE, shell=True)
try:
outs, errs = proc.communicate(timeout=2)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
NndctScreenLogger().error2user(QError.PROC_TIMEOUT, f"{errs}")
raise
if outs:
self._graph.save_as_dot(fname)
else:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Can't find dot command in the system, please install it." \
" Otherwise, the xmodel image will not be generated.")
except Exception as e:
NndctScreenLogger().warning2user(QWarning.EXPORT_XMODEL, f"Failed to generate xmodel image!({str(e)})")
def graph(self):
return self._graph
def data_xop(xgraph: XGraph, node: Node,
quant_config: NndctQuantInfo) -> NoReturn:
shape = node.out_tensors[0].shape
if not shape:
shape = [1]
if shape[0] == 0:
raise DataXopError("data", shape)
# shape = permute_axes(shape, node.transpose_out_order)
try:
out_tensor = np.zeros(shape, dtype=np.float32)
attrs: Dict[str, Any] = {}
attrs["shape"] = shape
attrs["data_type"] = _Converter.to_xir_dtype(out_tensor.dtype.type)
xgraph.create_fixed_normal_op(
node.name, "data", quant_config, tensor=out_tensor, attrs=attrs)
except:
raise DataXopError("data", shape) | null |
23,285 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
class XGraph(object):
def __init__(self, name: str):
def _check_inputs(self, input_ops):
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
def get_op_by_name(self, name: str) -> Op:
def get_op_output_shape(self, name: str) -> List[int]:
def export_to_xmodel(self, fname: str) -> NoReturn:
def export_to_img(self, fname: str) -> NoReturn:
def graph(self):
def dense(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
input_ops: Dict[str, List["xir.Op"]] = {}
for param_name, param_tensor in node.op.params.items():
if param_name == node.op.ParamName.WEIGHTS:
weights = xgraph.get_op_by_name(param_tensor.name)
else:
bias = xgraph.get_op_by_name(param_tensor.name)
input_ops["bias"] = [bias]
input_list = []
for input in node.in_nodes:
input_op = xgraph.get_op_by_name(input)
input_list.append(input_op)
input_ops["input"] = xgraph.create_input_fix_ops(input_list, node.name, quant_config)
input_ops["input"].append(weights)
attrs: Dict[str, Any] = {}
attrs["transpose_a"] = False
attrs["transpose_b"] = True
xgraph.create_fixed_normal_op(
node.name, "matmul", quant_config, attrs=attrs, input_ops=input_ops) | null |
23,289 | import math
import itertools
from typing import List, Dict, Any, NoReturn, Tuple
import numpy as np
from functools import partial
from nndct_shared.base import NNDCT_OP, NNDCT_KEYS
from nndct_shared.nndct_graph import Tensor, Node
from .xgraph import XGraph
from nndct_shared.utils import calculate_op_scale, DataXopError
NndctQuantInfo = Dict[str, Dict[str, List[int]]]
def _get_xir_attr_from_node(node: Node):
def scale(xgraph, node, quant_config):
class XGraph(object):
def __init__(self, name: str):
def _check_inputs(self, input_ops):
def create_const_op(self, name: str, data: Optional[np.ndarray]) -> NoReturn:
def create_input_transpose_ops(self, input_list: List[Op], input_tensors: 'List[base_tensor::Tensor]'):
def create_normal_op(self,
name: str,
kind: str,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_fixed_const_op(self, name: str, data: np.ndarray,
quant_info: NndctQuantInfo) -> Op:
def create_fixed_normal_op(self,
name: str,
kind: str,
quant_info: NndctQuantInfo,
tensor: Optional[np.ndarray] = None,
attrs: Optional[Dict[str, Any]] = None,
input_ops: Optional[List[Op]] = None) -> Op:
def create_input_fix_ops(self, input_list: List[Op], key_name: str, quant_info: NndctQuantInfo):
def create_fix_op(self, input: Op, key_name: str,
quant_info: NndctQuantInfo, id: Optional[int] = None, post_fix: bool = True) -> Optional[Op]:
def _get_fix_info(name: str, quant_info: NndctQuantInfo) -> Sequence[int]:
def get_op_by_name(self, name: str) -> Op:
def get_op_output_shape(self, name: str) -> List[int]:
def export_to_xmodel(self, fname: str) -> NoReturn:
def export_to_img(self, fname: str) -> NoReturn:
def graph(self):
def hsigmoid(xgraph: XGraph, node: Node, quant_config: NndctQuantInfo) -> NoReturn:
scale = 6.0 * 2731.0 / 16384.0
attrs = _get_xir_attr_from_node(node)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.in_nodes[0])]
input_ops["input"] = xgraph.create_input_fix_ops(input_ops["input"], node.name, quant_config)
xgraph.create_fixed_normal_op(
node.name + "_i0", "hard-sigmoid", quant_config, attrs=attrs, input_ops=input_ops)
scale = [scale]
xgraph.create_fixed_const_op(name=node.name + "_i1",
data=np.array(scale, dtype=np.float32),
quant_info=quant_config)
input_ops: Dict[str, List["xir.Op"]] = {}
input_ops["input"] = [xgraph.get_op_by_name(node.name + "_i0"), xgraph.get_op_by_name(node.name + "_i1")]
xgraph.create_fixed_normal_op(
node.name, "mul", quant_config, input_ops=input_ops) | null |
23,300 | from collections import defaultdict
from nndct_shared.utils import NndctDebugLogger, NndctOption
def log_debug_info(msg):
if NndctOption.nndct_inspect_debug.value:
NndctDebugLogger.write(f"{msg}\n") | null |
23,301 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
_SIMULATION_PATTERNS = [
{"name": "conv2d_fix_with_hardwish",
"nodes":[("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
("conv2d", Node({"conv2d", "matmul", "scale"})),
("conv2d_out", Node({"float2fix"})),
("hsigmoid_in", Node({"fix2float"})),
("hsigmoid", Node({"hard-sigmoid"})),
("mul", Node({"mul"})),
("hsigmoid_out", Node({"float2fix"})),
("hswish_i0", Node({"fix2float"})),
("hswish_i1", Node({"fix2float"})),
("hswish", Node({"mul"})),
("output", Node({"float2fix"})),
],
"edges": [("weights", "conv2d"),
("bias", "conv2d"),
("input", "conv2d"),
("conv2d", "conv2d_out"),
("conv2d_out", "hsigmoid_in"),
("hsigmoid_in", "hsigmoid"),
("hsigmoid", "mul"),
("mul", "hsigmoid_out"),
("conv2d_out", "hswish_i0"),
("hsigmoid_out", "hswish_i1"),
("hswish_i0", "hswish"),
("hswish_i1", "hswish"),
("hswish", "output"),
]
},
{"name": "conv2d_fix_with_hardsigmoid",
"nodes":[("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
("conv2d", Node({"conv2d", "matmul", "scale"})),
("conv2d_out", Node({"float2fix"})),
("hsigmoid_in", Node({"fix2float"})),
("hsigmoid", Node({"hard-sigmoid"})),
("mul", Node({"mul"})),
("output", Node({"float2fix"}))
],
"edges": [("weights", "conv2d"),
("bias", "conv2d"),
("input", "conv2d"),
("conv2d", "conv2d_out"),
("conv2d_out", "hsigmoid_in"),
("hsigmoid_in", "hsigmoid"),
("hsigmoid", "mul"),
("mul", "output")]
},
{"name": "conv2d_fix_with_relu",
"nodes":[("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
# ("conv2d", Node({"conv2d", "matmul", "depthwise-conv2d", "transposed-conv2d", "transposed-depthwise-conv2d", "scale"})),
("conv2d", Node({"matmul", "scale"})),
("relu", Node({"relu", "prelu", "leaky-relu", "relu6"})),
("output", Node({"float2fix"}))
],
"edges": [("weights", "conv2d"), ("bias", "conv2d"), ("input", "conv2d"), ("conv2d", "relu"), ("relu", "output")]
},
{"name": "conv2d_fix_without_relu",
"nodes": [("weights", Node({"fix2float"})),
("bias", Node({"fix2float"})),
("input", Node({"fix2float"})),
# ("conv2d", Node({"conv2d", "matmul", "depthwise-conv2d", "transposed-conv2d", "scale"})),
("conv2d", Node({"matmul", "scale"})),
("output", Node({"float2fix"}))
],
"edges": [("weights", "conv2d"), ("bias", "conv2d"), ("input", "conv2d"), ("conv2d", "output")]
},
{
"name": "reduction_mean_with_mul_relu",
"nodes": [
("input", Node({"fix2float"})),
("reduction_mean", Node({"reduction_mean"})),
("const", Node({"const"})),
("mul", Node({"mul"})),
("relu", Node({"relu","prelu", "leaky-relu", "relu6"})),
("output", Node({"float2fix"})),
],
"edges": [("input", "reduction_mean"), ("reduction_mean", "mul"), ("const", "mul"), ("mul", "relu"), ("relu", "output")]
},
{
"name": "reduction_mean_with_mul",
"nodes": [
("input", Node({"fix2float"})),
("reduction_mean", Node({"reduction_mean"})),
("const", Node({"const"})),
("mul", Node({"mul"})),
("output", Node({"float2fix"})),
],
"edges": [("input", "reduction_mean"), ("reduction_mean", "mul"), ("const", "mul"), ("mul", "output")]
},
# {
# "name": "reduction_mean_with_relu",
# "nodes": [
# ("input", Node({"fix2float"})),
# ("reduction_mean", Node({"reduction_mean"})),
# ("relu", Node({"relu","prelu", "leaky-relu", "relu6"})),
# ("output", Node({"float2fix"})),
# ],
# "edges": [("input", "reduction_mean"), ("reduction_mean", "relu"), ("relu", "output")]
# },
# {"name": "pool_with_mul",
# "nodes": [("input", Node({"fix2float"})),
# ("pool", Node({"avgpool2d"})),
# ("const", Node({"const"})),
# ("mul", Node({"mul"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "pool"), ("pool", "mul"), ("const", "mul"), ("mul", "output")]
# },
# {"name": "pool_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("pool", Node({"avgpool2d", "maxpool2d"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "pool"), ("pool", "output")]
# },
# { "name": "eltwise_fix_with_relu",
# "nodes": [("add", Node({"add", "mul"})),
# ("relu", Node({"relu"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("add", "relu"), ("relu", "output")]
# },
# { "name": "eltwise_fix",
# "nodes": [("add", Node({"add", "mul"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("add", "output")]
# },
# { "name": "concat_fix",
# "nodes": [("concat", Node({"concat"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("concat", "output")]
# },
# {
# "name": "resize_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("resize", Node({"resize"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "resize"), ("resize", "output")]
# },
# {
# "name": "pad_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("pad", Node({"pad"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "pad"), ("pad", "output")]
# },
# {
# "name": "reduction_max_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("reduction_max", Node({"reduction_max"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "reduction_max"), ("reduction_max", "output")],
# },
# {
# "name": "reshape_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("reshape", Node({"reshape"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "reshape"), ("reshape", "output")],
# },
# {
# "name": "hsigmoid_fix",
# "nodes": [("input", Node({"fix2float"})),
# ("hsigmoid", Node({"hard-sigmoid"})),
# ("output", Node({"float2fix"}))
# ],
# "edges": [("input", "hsigmoid"), ("hsigmoid", "output")],
# },
# {
# "name": "reduction_mean",
# "nodes": [("input", Node({"fix"})),
# ("reduction_mean", Node({"reduction_mean"})),
# ("output", Node({"fix"}))
# ],
# "edges": [("input", "reduction_mean"), ("reduction_mean", "output")],
# },
# {
# "name": "reduce_max",
# "nodes": [("input", Node({"fix"})),
# ("poollikeop", Node({"reduction_max"})),
# ("output", Node({"fix"}))
# ],
# "edges": [("input", "poollikeop"), ("poollikeop", "output")],
# },
]
class Graph(object):
def __init__(self, name):
def __str__(self):
def __eq__(self, other):
def node_match(node_1, node_2):
def add_node(self, id, obj):
def add_edge(self, u, v):
def children(self, n):
def parents(self, n):
def get_node_types(self, id):
def set_node_types(self, id, types):
def remove_node(self, n):
def remove_one_node(self, n):
def remove_edge(self, u, v):
def visualize(self):
def copy(cls, original_graph):
def nodes(self):
def op_types(self):
def graph(self):
def name(self):
def _gen_pattern_from_sim_pattern():
pattern_graphs = []
patterns = copy.deepcopy(_SIMULATION_PATTERNS)
for pattern_info in patterns:
pattern_graph = Graph(pattern_info["name"])
for id, attr in pattern_info["nodes"]:
pattern_graph.add_node(id, attr)
for u, v in pattern_info["edges"]:
pattern_graph.add_edge(u, v)
pattern_graphs.append(pattern_graph)
return pattern_graphs | null |
23,303 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
def get_templates_from_dpu_compiler():
def is_valid_pattern(pattern):
def reorder_patterns(patterns):
def create_pattern_graph(name: str, ops: "List[xir.op_template]"):
def convert_xir_type_to_nndct_type(pattern_graph):
def transform_pattern_graph(pattern_graph):
def log_debug_info(msg):
class XIRHelper(object):
def find_xops_from_nndct_node(cls, nndct_node, xmodel):
def get_xop_device_type(xop):
def get_xop_name(xop):
def get_xop_template_name(op_template):
def get_xop_template_types(op_template):
def get_xmodel_ops(xmodel):
def get_xop_type(xop):
def get_input_xops(xop):
def get_op_partition_msg(xop):
def is_dpu_pattern(cls, xmodel):
def get_pattern_partition_msg(cls, xmodel):
def is_valid_compiled_pattern(cls, xmodel):
def build_patterns_from_dpu_templates():
templates = get_templates_from_dpu_compiler()
log_debug_info("\nAll patterns from xcompiler:")
for id, (name, ops) in enumerate(templates):
log_debug_info(f"pattern id:{id}")
for op in ops:
log_debug_info(f"op name:{XIRHelper.get_xop_template_name(op)} type:{XIRHelper.get_xop_template_types(op)}")
patterns = []
pattern_graphs = []
for id, (name, ops) in enumerate(templates):
pattern_graph = create_pattern_graph(f"{name}_{id}", ops)
ret, msg = is_valid_pattern(pattern_graph)
if ret:
pattern_graphs.append(pattern_graph)
else:
log_debug_info(f"{pattern_graph.name} is filtered.({msg}).")
# pattern_graphs = pattern_graphs + _gen_pattern_from_sim_pattern()
log_debug_info("\nPattern Transformation:")
for pattern_graph in pattern_graphs:
log_debug_info(f"{pattern_graph.name} pattern")
log_debug_info("================Before transformation====================")
log_debug_info(str(pattern_graph))
transform_pattern_graph(pattern_graph)
if convert_xir_type_to_nndct_type(pattern_graph):
patterns.append(pattern_graph)
else:
log_debug_info(f"{pattern_graph.name} is ignored for there is at least one unknown op in the pattern.")
log_debug_info("================After transformation====================")
log_debug_info(str(pattern_graph))
patterns = reorder_patterns(patterns)
return patterns | null |
23,304 | import copy
import networkx as nx
from networkx.algorithms import is_isomorphic
from nndct_shared.base import NNDCT_OP
from nndct_shared.inspector.utils import build_xir_nndct_op_map, log_debug_info
from nndct_shared.compile.xir_helper import XIRHelper
from .graph import Graph, Node
class Graph(object):
def __init__(self, name):
def __str__(self):
def __eq__(self, other):
def node_match(node_1, node_2):
def add_node(self, id, obj):
def add_edge(self, u, v):
def children(self, n):
def parents(self, n):
def get_node_types(self, id):
def set_node_types(self, id, types):
def remove_node(self, n):
def remove_one_node(self, n):
def remove_edge(self, u, v):
def visualize(self):
def copy(cls, original_graph):
def nodes(self):
def op_types(self):
def graph(self):
def name(self):
def drop_fix_in_pattern(pattern_graph):
fix = {NNDCT_OP.FIX}
removed_node = []
pattern_without_fix = Graph.copy(pattern_graph)
for node in pattern_graph.nodes:
if pattern_without_fix.get_node_types(node) == fix:
removed_node.append(node)
for node in removed_node:
pattern_without_fix.remove_one_node(node)
return pattern_without_fix | null |
23,305 | from typing import Mapping
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.expanding.op_modifier import op_modifier
from nndct_shared.expanding.spec import DataInsert, GenericStructuredExpanding, StructuredExpanding
from nndct_shared.expanding.op_modifier import op_modifier
def propagate_node_expanding(node, node_expand_desc: Mapping[str, StructuredExpanding]):
node_expanding = node_expand_desc[node.name]
assert isinstance(node_expanding, GenericStructuredExpanding), \
"Variable node_expanding here has to be instance of GenericStructuredExpanding"
input_expanding = node_expand_desc[node.in_nodes[0]]
node_expanding.in_dim = input_expanding.out_dim
node_expanding.out_dim = node_expanding.in_dim
for insert in input_expanding.out_inserts:
node_expanding.add_insert(insert)
class Graph(GraphBase):
""" Graph object of NNDCT, contain list of NndctNodes.
That will be used for topology or export to XGraph"""
def __init__(self, graph_name=None):
super(Graph, self).__init__()
self._name = graph_name or 'NndctGraph'
self._nodes_by_name = {}
self._nodes_by_id = {}
self._end_tensors = []
self._copy_tensor_map = {}
self._tensors = {}
self._blocks = []
self._param_names = []
self._top_block = None
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
if isinstance(node_or_name, str):
return node_or_name in self._nodes_by_name
else:
return node_or_name.name in self._nodes_by_name
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone(self):
graph = self.__class__(self.name)
graph.clone_from(self)
return graph
def clone_from(self, src_graph):
local_map = {}
converted_nodes = []
head_node = self.create_node_from(src_graph.head_node, local_map, converted_nodes)
return_node = self.create_node_from(src_graph.return_node, local_map, converted_nodes)
top_block = Block(self, None, head_node, return_node)
self.set_top_block(top_block)
self._top_block.clone_from(src_graph.block, local_map, converted_nodes)
def create_node_from(self, src_node, local_map, converted_nodes):
node = Node(src_node.name, dtype=src_node.dtype, in_quant_part=src_node.in_quant_part)
node.owning_graph = self
node.idx = src_node.idx
node.scope_name = src_node.scope_name
node.source_range = src_node.source_range
node.target_device = src_node.target_device
node.normalized_name = src_node.normalized_name
converted_nodes.append(src_node.name)
for out in src_node.out_tensors:
if out.name in local_map:
node.add_out_tensor(local_map[out.name])
else:
tensor = Tensor(name=out.name)
tensor.clone_from(out)
local_map[out.name] = tensor
node.add_out_tensor(tensor)
for inp in src_node.in_tensors:
if inp.name in local_map:
node.add_in_tensor(local_map[inp.name])
else:
tensor = Tensor(name=inp.name)
tensor.clone_from(inp)
local_map[inp.name] = tensor
node.add_in_tensor(tensor)
node.clone_from(src_node, local_map)
for src_block in src_node.blocks:
head_node = self.create_node_from(src_block.input_node, local_map, converted_nodes)
return_node = self.create_node_from(src_block.return_node, local_map, converted_nodes)
block = Block(self, node, head_node, return_node)
block.clone_from(src_block, local_map, converted_nodes)
node.add_block(block)
return node
def node(self, name):
"""Return node with the specified name"""
return self._nodes_by_name.get(name, None)
def get_node_by_idx(self, idx):
node = self._nodes_by_id.get(idx, None)
assert node is not None
return node
def get_input_nodes(self):
input_nodes = []
for node in self.nodes:
if (len(self.parents(node)) == 0) and \
(node.op.type==NNDCT_OP.INPUT or node.op.type==NNDCT_OP.TUPLE_INPUT):
input_nodes.append(node)
return input_nodes
def get_input_tensors(self, input_args):
input_tensors = []
graph_name = self.name
input_nodes = self.get_input_nodes()
for idx in range(len(input_args)):
#input_node_name = graph_name + "::input_" + str(idx)
#input_node = self.node(input_node_name)
input_node = input_nodes[idx]
input_tensor = input_node.out_tensors[0]
if input_node.op.type == NNDCT_OP.INPUT:
input_tensors.append(input_tensor.name)
elif input_node.op.type == NNDCT_OP.TUPLE_INPUT:
for index in range(len(input_args[idx])):
input_tensor_name = input_tensor.name + '.' + str(index)
input_tensors.append(input_tensor_name)
return input_tensors
def get_return_tensors(self):
return_tensors = []
for tensor in self.return_node.in_tensors:
return_tensors.append(tensor.name)
return return_tensors
def add_node(self, node: Node) -> None:
if node.name in self._nodes_by_name:
return
if node.idx in self._nodes_by_id and node is not self._nodes_by_id[node.idx]:
raise RuntimeError(f"The id `{node.idx}` of {node.name} has been added into graph")
if node.idx == -1:
# if not self._nodes_by_id:
# node._idx = 0
# else:
# node._idx = max([node.idx for node in self.all_nodes()]) + 1
node._idx = -sys.maxsize + len(list(self.all_nodes()))
self._nodes_by_name[node.name] = node
self._nodes_by_id[node.idx] = node
def free_node(self, node):
node.owning_graph = None
self._nodes_by_name.pop(node.name)
self._nodes_by_id.pop(node.idx)
def remove_node(self, node):
assert node.in_tensors
assert len(node.out_tensors) == 1
out_tensor = node.out_tensors[0]
inp_tensor = node.in_tensors[0]
out_tensor.replace_uses_with(inp_tensor)
node.destroy()
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
if any([node_type in self.op_types for node_type in node_types]):
nodes_to_remove = []
for node in self.nodes:
if node.op.type in node_types:
nodes_to_remove.append(node)
for node in nodes_to_remove:
self.remove_node(node)
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
conv_nodes = []
for node in self.nodes:
if node.op.type in node_types:
conv_nodes.append(node)
else:
continue
return conv_nodes
def reconnect_nodes(self):
self._nodes_by_id.clear()
for idx, node in enumerate(self.nodes):
node.idx = idx
self._nodes_by_id[idx] = node
node.clean_connections()
self.connect_nodes()
def connect_nodes(self):
for nodeA in self.nodes:
for input_tensor in nodeA.in_tensors:
for nodeB in self.nodes:
if nodeB is not nodeA and input_tensor in nodeB.out_tensors:
#nodeB.outputs.add(input_tensor.node.name)
nodeB.add_out_node(nodeA.name)
nodeA.add_in_node(input_tensor.node.name)
def parents(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.in_nodes]
def children(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.out_nodes]
def add_tensor(self, tensor):
self._tensors[tensor.name] = tensor
def tensor(self, name):
return self._tensors.get(name, None)
def param_tensor(self, name):
for node in self.all_nodes():
for _, tensor in node.op.params.items():
if tensor.name == name:
return tensor
def add_end_tensor(self, tensor):
self._end_tensors.append(tensor)
def __repr__(self):
return f"Graph(name={self.name})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def description(self):
graph_des = {}
graph_des['graph_name'] = f"{self.__class__.__name__}"
graph_des['nodes'] = []
for n in sorted(self.nodes, key=lambda n: n.idx):
graph_des['nodes'].append(n.description())
return graph_des
def set_node_id(self, index, node):
node.idx = index
self._nodes_by_id[index] = node
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
"""
create a subgraph from nodeset belong to origin graph
"""
assert len(nodeset) >= 2
sorted_nodeset = origin_graph.top_sort_nodeset(nodeset)
for node in sorted_nodeset:
node.remove_from_list()
subgraph = cls(graph_name)
sorted_nodeset[0].owning_graph = subgraph
sorted_nodeset[-1].owning_graph = subgraph
block = Block(subgraph, None, sorted_nodeset[0], sorted_nodeset[-1])
subgraph.set_top_block(block)
if len(sorted_nodeset) > 2:
for node in sorted_nodeset[1:-1]:
node.owning_graph = subgraph
subgraph.append_node(node)
return subgraph
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
sorted_nodeset = sorted(nodeset, key=lambda n: n.topo_position)
return sorted_nodeset
def get_topological_graph_nodes_list(self):
nodes_list = [node for node in self.nodes]
return Graph.top_sort_nodeset(nodes_list)
def name(self):
return self._name
def name(self, name):
self._name = name
def nodes(self):
return self._top_block.nodes
def reverse_nodes(self):
return self._top_block.reverse_nodes
def tensors(self):
for tensor in self._tensors.values():
yield tensor
# TODO: Remove
def end_tensors(self):
return [tensor for tensor in self.return_node.in_tensors]
def inputs(self):
return [node for node in self.all_nodes() if not node.in_nodes]
def outputs(self):
return [node for node in self.all_nodes() if not node.out_nodes]
def op_types(self):
return {node.op.type for node in self.all_nodes()}
def append_node(self, node):
self._top_block.append_node(node)
def add_param_name(self, param_name):
if param_name not in self._param_names:
self._param_names.append(param_name)
def param_names(self):
return list(self._param_names)
def block(self):
return self._top_block
def is_tensor_in_graph(self, tensor_name):
return True if tensor_name in self._tensors else False
def update_node_idx(self, node, index):
self._nodes_by_id[index] = node
def clear_node_id_map(self):
self._nodes_by_id.clear()
def remove_tensor(self, tensor):
self._tensors.pop(tensor.name)
if tensor.name in self._param_names:
self._param_names.remove(tensor.name)
def insert_node_between_nodes(self, new_node, parent_node, child_node):
assert parent_node.in_node_list() and child_node.in_node_list()
assert (parent_node.owning_graph == child_node.owning_graph
and parent_node.owning_block == child_node.owning_block)
new_node.owning_block = parent_node.owning_block
new_node.owning_graph = parent_node.owning_graph
tensor = Tensor(name=new_node.name, node=new_node)
new_node.add_out_tensor(tensor)
out_tensor = None
offset = None
for out in parent_node.out_tensors:
for use in out.uses:
if use.user is child_node:
out_tensor = out
offset = use.offset
break
#out_tensor.replace_uses_with(new_node.out_tensors[0])
child_node.replace_input_at(offset, new_node.out_tensors[0])
new_node.add_in_tensor(out_tensor)
new_node.insert_after(parent_node)
def set_top_block(self, block):
self._top_block = block
def add_block(self, block):
self._blocks.append(block)
def all_blocks(self):
return self._blocks
def all_nodes(self):
for _, node in self._nodes_by_name.items():
yield node
def head_node(self):
return self._top_block.input_node
def return_node(self):
return self._top_block.return_node
def clean_tensors_data(self):
for tensor in self.tensors:
tensor.clean_data()
def assign_node_topological_name(self, prefix="", suffix=""):
count = itertools.count(0)
illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
def _assgin_nodes(nodes):
sorted_nodes = self.top_sort_nodeset(list(nodes))
for n in sorted_nodes:
if n.blocks:
for block in n.blocks:
_assgin_nodes(block.nodes)
else:
candidate = illegal_char_regex.sub("_", n.op.type)
n.normalized_name = f"{prefix}{candidate}_{next(count)}{suffix}"
_assgin_nodes(self.nodes)
def simple_description(self):
"""
Only describe op type topological info.
"""
def get_node_simple_info(node):
node_des = {}
node_des['op'] = node.op.type
node_des["input_ops"] = [node.owning_graph.node(inode).op.type for inode in node.in_nodes]
node_des["output_ops"] = [node.owning_graph.node(onode).op.type for onode in node.out_nodes]
if node.blocks:
for i, block in enumerate(node.blocks):
node_des[f'block_{i}'] = []
for n in self.top_sort_nodeset(list(block.nodes)):
node_des[f'block_{i}'].append(get_node_simple_info(n))
return node_des
graph_des = {}
graph_des['nodes'] = []
for n in self.top_sort_nodeset(list(self.nodes)):
graph_des['nodes'].append(get_node_simple_info(n))
graph_str = json.dumps(graph_des, indent=2, separators=(',', ': '))
return graph_str
def get_md5(self):
import hashlib
graph_str = self.simple_description()
md = hashlib.md5()
md.update(graph_str.encode("utf-8"))
return md.hexdigest()
class Node(NodeBase):
"""A node contains an op and its input and output tensor.
"""
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
super().__init__()
self._name = name
self._op = op
self._dtype = dtype
self._idx = -1
self._scope_name = ""
self._source_range = ""
self._normalized_name = ""
self._in_tensors = []
self._out_tensors = []
self._in_nodes = []
self._out_nodes = []
self._blocks = []
self._is_quantizable = in_quant_part
self._is_merged = False
self._transpose_in_order = None
self._transpose_out_order = None
self._topo_position = 0
self._block = None
self._graph = None
self._neighbor_nodes = [None, None]
self._target_device = None
def __repr__(self):
return f"Node(name={self.name}, id={self.idx}, op_type={self.op.type}, quant_state={self.in_quant_part})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_node, local_map):
tmp_attrs = src_node.op._attrs
tmp_params = src_node.op._params
tmp_configs = src_node.op._configs
src_node.op._params = copy.copy(tmp_params)
src_node.op._attrs = copy.copy(tmp_attrs)
src_node.op._configs = copy.copy(tmp_configs)
self.op = copy.copy(src_node.op)
self.op._export_attr_and_param()
src_node.op._attrs = tmp_attrs
src_node.op._params = tmp_params
src_node.op._configs = tmp_configs
self.op.clone_from(src_node.op, local_map)
def scope_name(self):
return self._scope_name
def scope_name(self, name):
self._scope_name = name
def description(self):
node_des = {}
node_des['name'] = self._name
node_des['scope_name'] = self._scope_name
node_des['idx'] = self._idx
node_des['dtype'] = self._dtype
node_des['enable_quant'] = self._is_quantizable
node_des['in_nodes'] = [i for i in self.in_nodes]
node_des['out_nodes'] = [o for o in self.out_nodes]
node_des['in_tensors'] = [it.description() for it in self.in_tensors]
node_des['out_tensors'] = [ot.description() for ot in self.out_tensors]
node_des['op'] = self._op.description()
if self._blocks:
for i, block in enumerate(self._blocks):
node_des[f'block_{i}'] = []
for n in sorted(block.nodes, key=lambda n: n.idx):
node_des[f'block_{i}'].append(n.description())
return node_des
def clean_connections(self):
self._in_nodes = []
self._out_nodes = []
def add_in_node(self, node_name: str):
if node_name not in self._in_nodes:
self._in_nodes.append(node_name)
def add_out_node(self, node_name: str):
if node_name not in self._out_nodes:
self._out_nodes.append(node_name)
def in_tensors(self):
return self._in_tensors
def out_tensors(self):
return self._out_tensors
def in_nodes(self):
nodes = []
for tensor in self.in_tensors:
if tensor.node is not None:
nodes.append(tensor.node.name)
return nodes
def out_nodes(self):
nodes = []
for out in self.out_tensors:
for use in out.uses:
nodes.append(use.user.name)
return nodes
def node_attr(self, key):
return self._op.get_attr(key)
def set_node_attr(self, key, value):
if all([val is None for val in self._op._attr_value_mem[key]]):
self._op.set_attr(key, value)
else:
self._op.update_attr(key, value)
def node_config(self, key):
return self._op.get_config(key)
def set_node_config(self, key, value):
self._op.set_config(key, value)
def has_bound_params(self):
return self._op.has_native_params()
def op_type(self):
return self.op.type
def name(self):
return self._name
def name(self, value):
self._name = value
def idx(self):
return self._idx
def idx(self, index):
self._idx = index
self.owning_graph.update_node_idx(self, index)
def op(self):
return self._op
def op(self, op):
self._op = op
def dtype(self):
return self._dtype
# @property
# def alias(self):
# return self._alias
def in_quant_part(self) -> bool:
return self._is_quantizable
def in_quant_part(self, quant_state: bool) -> None:
self._is_quantizable = quant_state
def module(self):
return self._module()
def module(self, module):
self._module = weakref.ref(module)
def blocks(self):
return self._blocks
def add_block(self, block):
self._blocks.append(block)
def has_custom_op(self):
return isinstance(self.op, CustomOp)
def get_attr_val(self, attr_name):
attr = self.node_attr(attr_name)
return attr.data if isinstance(attr, Tensor) else attr
def merged(self):
return self._is_merged
def merged(self, flag):
self._is_merged = flag
def transpose_in_order(self):
return self._transpose_in_order
def transpose_in_order(self, order):
self._transpose_in_order = order
def transpose_out_order(self):
return self._transpose_out_order
def transpose_out_order(self, order):
self._transpose_out_order = order
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
for attr_name, attr_value in self.op.attrs.items():
if attr_value.value is old_tensor:
self.set_node_attr(attr_name, new_tensor)
def destroy(self):
if len(self.blocks) > 0:
raise RuntimeError("Can't destroy if or loop node.")
while len(self.out_tensors) > 0:
self.remove_output(len(self.out_tensors) - 1)
self.remove_all_inputs()
if self.in_node_list():
self.remove_from_list()
self.owning_graph.free_node(self)
def remove_output(self, i):
assert i < len(self.out_tensors)
assert len(self.out_tensors[i].uses) == 0
output = self.out_tensors.pop(i)
self.owning_graph.remove_tensor(output)
for output_offset in range(i, len(self.out_tensors)):
self.out_tensors[output_offset].offset -= 1
def replace_input_at(self, i, new_tensor):
old_tensor = self.in_tensors[i]
if old_tensor is new_tensor:
return
self.in_tensors[i] = new_tensor
uses = [u for u in old_tensor.uses]
attr_uses = [attr_u for attr_u in old_tensor.attr_uses]
for u in uses:
if u.user is self:
new_tensor.uses.append(u)
old_tensor.uses.remove(u)
for attr_u in attr_uses:
if attr_u.user is self.op:
old_tensor.replace_attr_with_new_tensor_v2(attr_u, new_tensor)
def remove_input(self, i):
self.drop_input(i)
for j in range(i + 1, len(self._in_tensors)):
it = self.find_use_for_input(j)
it.offset -= 1
self._in_tensors.pop(i)
def remove_all_inputs(self):
for i in range(len(self.in_tensors)):
self.drop_input(i)
self.in_tensors.clear()
def drop_input(self, i):
assert i < len(self.in_tensors)
input_value = self.in_tensors[i]
use_it = self.find_use_for_input(i)
input_value.uses.remove(use_it)
self.in_tensors[i] = None
return input_value
def find_use_for_input(self, i):
use_it = None
for use in self.in_tensors[i].uses:
if use.offset == i and use.user is self:
use_it = use
assert use_it is not None
return use_it
def owning_block(self):
return self._block
def owning_block(self, block):
self._block = block
def owning_graph(self):
return self._graph
def owning_graph(self, graph):
self._graph = graph
if self._graph:
self._graph.add_node(self)
def topo_position(self):
return self._topo_position
def topo_position(self, pos):
self._topo_position = pos
def insert_before(self, node):
assert node.in_node_list()
self.insert_after(node.prev_node)
def insert_after(self, node):
assert not self.in_node_list() and node.in_node_list()
assert node.owning_block is not None
self._block = node.owning_block
next_node = node.next_node
node.next_node = self
self.prev_node = node
self.next_node = next_node
next_node.prev_node = self
self.update_topo_position()
def update_topo_position(self):
is_first_node = self.prev_node is self.owning_block.input_node
is_last_node = self.next_node is self.owning_block.return_node
prev_pos = self.prev_node.topo_position
next_pos = self.next_node.topo_position
if is_last_node:
if is_first_node:
self.topo_position = MID_POSITION
return
if prev_pos >= (POSITION_UPPER_BOUND - APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = prev_pos + APPEND_INTERVAL
elif is_first_node:
if next_pos <= (POSITION_LOWER_BOUND + APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = next_pos - APPEND_INTERVAL
else:
pos_between = prev_pos + (next_pos - prev_pos) / 2
if pos_between == prev_pos:
self.owning_block.reindex_topo()
return
self.topo_position = pos_between
def next_node(self):
return self._neighbor_nodes[1]
def next_node(self, node):
self._neighbor_nodes[1] = node
def prev_node(self):
return self._neighbor_nodes[0]
def prev_node(self, node):
self._neighbor_nodes[0] = node
def in_node_list(self):
if self.next_node is None:
assert self.prev_node is None
return self.next_node is not None
def remove_from_list(self):
assert self.in_node_list()
if self.owning_block.input_node is self:
self.owning_block.input_node = self.next_node
self.owning_block = None
next_node = self.next_node
prev_node = self.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
self.next_node = None
self.prev_node = None
def add_in_tensor(self, tensor):
tensor.uses.append(Use(self, len(self.in_tensors)))
self._in_tensors.append(tensor)
self.owning_graph.add_tensor(tensor)
def add_out_tensor(self, tensor):
tensor.offset = len(self.out_tensors)
self._out_tensors.append(tensor)
tensor.node = self
self.owning_graph.add_tensor(tensor)
def target_device(self):
return self._target_device
def target_device(self, device):
self._target_device = device
def scope_name(self):
return self._scope_name
def scope_name(self, scope_name):
self._scope_name = scope_name
def source_range(self):
return self._source_range
def source_range(self, source_range):
self._source_range = source_range
def normalized_name(self):
return self._normalized_name
def normalized_name(self, name):
self._normalized_name = name
op_modifier = registry.Registry("expanding Modifier Functions")
])
])
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
self._node_name: str = node_name
self._in_dim: int = 0
self._out_dim: int = 0
def node_name(self) -> str:
return self._node_name
def in_dim(self) -> int:
return self._in_dim
def in_dim(self, v: int) -> None:
self._in_dim = v
def out_dim(self) -> int:
return self._out_dim
def out_dim(self, v: int) -> None:
self._out_dim = v
def added_out_channel(self) -> int:
raise NotImplementedError("method added_out_channel is not implemented")
def added_in_channel(self) -> int:
raise NotImplementedError("method added_in_channel is not implemented")
def out_inserts(self) -> List[DataInsert]:
raise NotImplementedError("method out_inserts is not implemented")
def update_node_by_expanding(graph: Graph, node: Node, node_expand_desc: Mapping[str, StructuredExpanding]):
op_type = node.op.type
if op_type in op_modifier:
mod_func = op_modifier.lookup(op_type)
mod_func(graph, node, node_expand_desc)
else:
propagate_node_expanding(node, node_expand_desc) | null |
23,306 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
def _modify_depthwise(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
def modify_depthwise_conv(graph, node, pruning_res):
_modify_depthwise(graph, node, pruning_res) | null |
23,312 | from typing import Mapping
from nndct_shared.expanding.spec import BatchNormStructuredExpanding, InstanceNormStructuredExpanding, \
DataInsert, GenericStructuredExpanding, StructuredExpanding, WeightedNodeStructuredExpanding
from nndct_shared.nndct_graph.base_graph import Graph
from nndct_shared.nndct_graph.base_node import Node
from nndct_shared.nndct_graph.base_tensor import Tensor
from nndct_shared.utils import registry
from nndct_shared.base.key_names import NNDCT_OP as OpTypes
from nndct_shared.pruning.pruning_lib import is_depthwise_conv, _DISALLOW_PRUNED_INPUT_OPS, find_prunable_ancestor, CONV_OPS
import numpy as np
class StructuredExpanding(object):
def __init__(self, node_name: str) -> None:
self._node_name: str = node_name
self._in_dim: int = 0
self._out_dim: int = 0
def node_name(self) -> str:
return self._node_name
def in_dim(self) -> int:
return self._in_dim
def in_dim(self, v: int) -> None:
self._in_dim = v
def out_dim(self) -> int:
return self._out_dim
def out_dim(self, v: int) -> None:
self._out_dim = v
def added_out_channel(self) -> int:
raise NotImplementedError("method added_out_channel is not implemented")
def added_in_channel(self) -> int:
raise NotImplementedError("method added_in_channel is not implemented")
def out_inserts(self) -> List[DataInsert]:
raise NotImplementedError("method out_inserts is not implemented")
class Graph(GraphBase):
""" Graph object of NNDCT, contain list of NndctNodes.
That will be used for topology or export to XGraph"""
def __init__(self, graph_name=None):
super(Graph, self).__init__()
self._name = graph_name or 'NndctGraph'
self._nodes_by_name = {}
self._nodes_by_id = {}
self._end_tensors = []
self._copy_tensor_map = {}
self._tensors = {}
self._blocks = []
self._param_names = []
self._top_block = None
def __contains__(self, node_or_name: Union[str, Node]) -> bool:
if isinstance(node_or_name, str):
return node_or_name in self._nodes_by_name
else:
return node_or_name.name in self._nodes_by_name
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone(self):
graph = self.__class__(self.name)
graph.clone_from(self)
return graph
def clone_from(self, src_graph):
local_map = {}
converted_nodes = []
head_node = self.create_node_from(src_graph.head_node, local_map, converted_nodes)
return_node = self.create_node_from(src_graph.return_node, local_map, converted_nodes)
top_block = Block(self, None, head_node, return_node)
self.set_top_block(top_block)
self._top_block.clone_from(src_graph.block, local_map, converted_nodes)
def create_node_from(self, src_node, local_map, converted_nodes):
node = Node(src_node.name, dtype=src_node.dtype, in_quant_part=src_node.in_quant_part)
node.owning_graph = self
node.idx = src_node.idx
node.scope_name = src_node.scope_name
node.source_range = src_node.source_range
node.target_device = src_node.target_device
node.normalized_name = src_node.normalized_name
converted_nodes.append(src_node.name)
for out in src_node.out_tensors:
if out.name in local_map:
node.add_out_tensor(local_map[out.name])
else:
tensor = Tensor(name=out.name)
tensor.clone_from(out)
local_map[out.name] = tensor
node.add_out_tensor(tensor)
for inp in src_node.in_tensors:
if inp.name in local_map:
node.add_in_tensor(local_map[inp.name])
else:
tensor = Tensor(name=inp.name)
tensor.clone_from(inp)
local_map[inp.name] = tensor
node.add_in_tensor(tensor)
node.clone_from(src_node, local_map)
for src_block in src_node.blocks:
head_node = self.create_node_from(src_block.input_node, local_map, converted_nodes)
return_node = self.create_node_from(src_block.return_node, local_map, converted_nodes)
block = Block(self, node, head_node, return_node)
block.clone_from(src_block, local_map, converted_nodes)
node.add_block(block)
return node
def node(self, name):
"""Return node with the specified name"""
return self._nodes_by_name.get(name, None)
def get_node_by_idx(self, idx):
node = self._nodes_by_id.get(idx, None)
assert node is not None
return node
def get_input_nodes(self):
input_nodes = []
for node in self.nodes:
if (len(self.parents(node)) == 0) and \
(node.op.type==NNDCT_OP.INPUT or node.op.type==NNDCT_OP.TUPLE_INPUT):
input_nodes.append(node)
return input_nodes
def get_input_tensors(self, input_args):
input_tensors = []
graph_name = self.name
input_nodes = self.get_input_nodes()
for idx in range(len(input_args)):
#input_node_name = graph_name + "::input_" + str(idx)
#input_node = self.node(input_node_name)
input_node = input_nodes[idx]
input_tensor = input_node.out_tensors[0]
if input_node.op.type == NNDCT_OP.INPUT:
input_tensors.append(input_tensor.name)
elif input_node.op.type == NNDCT_OP.TUPLE_INPUT:
for index in range(len(input_args[idx])):
input_tensor_name = input_tensor.name + '.' + str(index)
input_tensors.append(input_tensor_name)
return input_tensors
def get_return_tensors(self):
return_tensors = []
for tensor in self.return_node.in_tensors:
return_tensors.append(tensor.name)
return return_tensors
def add_node(self, node: Node) -> None:
if node.name in self._nodes_by_name:
return
if node.idx in self._nodes_by_id and node is not self._nodes_by_id[node.idx]:
raise RuntimeError(f"The id `{node.idx}` of {node.name} has been added into graph")
if node.idx == -1:
# if not self._nodes_by_id:
# node._idx = 0
# else:
# node._idx = max([node.idx for node in self.all_nodes()]) + 1
node._idx = -sys.maxsize + len(list(self.all_nodes()))
self._nodes_by_name[node.name] = node
self._nodes_by_id[node.idx] = node
def free_node(self, node):
node.owning_graph = None
self._nodes_by_name.pop(node.name)
self._nodes_by_id.pop(node.idx)
def remove_node(self, node):
assert node.in_tensors
assert len(node.out_tensors) == 1
out_tensor = node.out_tensors[0]
inp_tensor = node.in_tensors[0]
out_tensor.replace_uses_with(inp_tensor)
node.destroy()
def remove_node_by_types(self, node_types: List[str]) -> Dict[str, str]:
if any([node_type in self.op_types for node_type in node_types]):
nodes_to_remove = []
for node in self.nodes:
if node.op.type in node_types:
nodes_to_remove.append(node)
for node in nodes_to_remove:
self.remove_node(node)
def find_nodes_by_types(self, node_types: List[NNDCT_OP]):
conv_nodes = []
for node in self.nodes:
if node.op.type in node_types:
conv_nodes.append(node)
else:
continue
return conv_nodes
def reconnect_nodes(self):
self._nodes_by_id.clear()
for idx, node in enumerate(self.nodes):
node.idx = idx
self._nodes_by_id[idx] = node
node.clean_connections()
self.connect_nodes()
def connect_nodes(self):
for nodeA in self.nodes:
for input_tensor in nodeA.in_tensors:
for nodeB in self.nodes:
if nodeB is not nodeA and input_tensor in nodeB.out_tensors:
#nodeB.outputs.add(input_tensor.node.name)
nodeB.add_out_node(nodeA.name)
nodeA.add_in_node(input_tensor.node.name)
def parents(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.in_nodes]
def children(self, node: Union[Node, str]) -> List[Node]:
if isinstance(node, str):
node = self.node(node)
return [self.node(node_name) for node_name in node.out_nodes]
def add_tensor(self, tensor):
self._tensors[tensor.name] = tensor
def tensor(self, name):
return self._tensors.get(name, None)
def param_tensor(self, name):
for node in self.all_nodes():
for _, tensor in node.op.params.items():
if tensor.name == name:
return tensor
def add_end_tensor(self, tensor):
self._end_tensors.append(tensor)
def __repr__(self):
return f"Graph(name={self.name})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def description(self):
graph_des = {}
graph_des['graph_name'] = f"{self.__class__.__name__}"
graph_des['nodes'] = []
for n in sorted(self.nodes, key=lambda n: n.idx):
graph_des['nodes'].append(n.description())
return graph_des
def set_node_id(self, index, node):
node.idx = index
self._nodes_by_id[index] = node
def create_subgraph_from_nodeset(cls, origin_graph, nodeset, graph_name):
"""
create a subgraph from nodeset belong to origin graph
"""
assert len(nodeset) >= 2
sorted_nodeset = origin_graph.top_sort_nodeset(nodeset)
for node in sorted_nodeset:
node.remove_from_list()
subgraph = cls(graph_name)
sorted_nodeset[0].owning_graph = subgraph
sorted_nodeset[-1].owning_graph = subgraph
block = Block(subgraph, None, sorted_nodeset[0], sorted_nodeset[-1])
subgraph.set_top_block(block)
if len(sorted_nodeset) > 2:
for node in sorted_nodeset[1:-1]:
node.owning_graph = subgraph
subgraph.append_node(node)
return subgraph
def top_sort_nodeset(nodeset: Sequence[Node]) -> List[Node]:
sorted_nodeset = sorted(nodeset, key=lambda n: n.topo_position)
return sorted_nodeset
def get_topological_graph_nodes_list(self):
nodes_list = [node for node in self.nodes]
return Graph.top_sort_nodeset(nodes_list)
def name(self):
return self._name
def name(self, name):
self._name = name
def nodes(self):
return self._top_block.nodes
def reverse_nodes(self):
return self._top_block.reverse_nodes
def tensors(self):
for tensor in self._tensors.values():
yield tensor
# TODO: Remove
def end_tensors(self):
return [tensor for tensor in self.return_node.in_tensors]
def inputs(self):
return [node for node in self.all_nodes() if not node.in_nodes]
def outputs(self):
return [node for node in self.all_nodes() if not node.out_nodes]
def op_types(self):
return {node.op.type for node in self.all_nodes()}
def append_node(self, node):
self._top_block.append_node(node)
def add_param_name(self, param_name):
if param_name not in self._param_names:
self._param_names.append(param_name)
def param_names(self):
return list(self._param_names)
def block(self):
return self._top_block
def is_tensor_in_graph(self, tensor_name):
return True if tensor_name in self._tensors else False
def update_node_idx(self, node, index):
self._nodes_by_id[index] = node
def clear_node_id_map(self):
self._nodes_by_id.clear()
def remove_tensor(self, tensor):
self._tensors.pop(tensor.name)
if tensor.name in self._param_names:
self._param_names.remove(tensor.name)
def insert_node_between_nodes(self, new_node, parent_node, child_node):
assert parent_node.in_node_list() and child_node.in_node_list()
assert (parent_node.owning_graph == child_node.owning_graph
and parent_node.owning_block == child_node.owning_block)
new_node.owning_block = parent_node.owning_block
new_node.owning_graph = parent_node.owning_graph
tensor = Tensor(name=new_node.name, node=new_node)
new_node.add_out_tensor(tensor)
out_tensor = None
offset = None
for out in parent_node.out_tensors:
for use in out.uses:
if use.user is child_node:
out_tensor = out
offset = use.offset
break
#out_tensor.replace_uses_with(new_node.out_tensors[0])
child_node.replace_input_at(offset, new_node.out_tensors[0])
new_node.add_in_tensor(out_tensor)
new_node.insert_after(parent_node)
def set_top_block(self, block):
self._top_block = block
def add_block(self, block):
self._blocks.append(block)
def all_blocks(self):
return self._blocks
def all_nodes(self):
for _, node in self._nodes_by_name.items():
yield node
def head_node(self):
return self._top_block.input_node
def return_node(self):
return self._top_block.return_node
def clean_tensors_data(self):
for tensor in self.tensors:
tensor.clean_data()
def assign_node_topological_name(self, prefix="", suffix=""):
count = itertools.count(0)
illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
def _assgin_nodes(nodes):
sorted_nodes = self.top_sort_nodeset(list(nodes))
for n in sorted_nodes:
if n.blocks:
for block in n.blocks:
_assgin_nodes(block.nodes)
else:
candidate = illegal_char_regex.sub("_", n.op.type)
n.normalized_name = f"{prefix}{candidate}_{next(count)}{suffix}"
_assgin_nodes(self.nodes)
def simple_description(self):
"""
Only describe op type topological info.
"""
def get_node_simple_info(node):
node_des = {}
node_des['op'] = node.op.type
node_des["input_ops"] = [node.owning_graph.node(inode).op.type for inode in node.in_nodes]
node_des["output_ops"] = [node.owning_graph.node(onode).op.type for onode in node.out_nodes]
if node.blocks:
for i, block in enumerate(node.blocks):
node_des[f'block_{i}'] = []
for n in self.top_sort_nodeset(list(block.nodes)):
node_des[f'block_{i}'].append(get_node_simple_info(n))
return node_des
graph_des = {}
graph_des['nodes'] = []
for n in self.top_sort_nodeset(list(self.nodes)):
graph_des['nodes'].append(get_node_simple_info(n))
graph_str = json.dumps(graph_des, indent=2, separators=(',', ': '))
return graph_str
def get_md5(self):
import hashlib
graph_str = self.simple_description()
md = hashlib.md5()
md.update(graph_str.encode("utf-8"))
return md.hexdigest()
class Node(NodeBase):
"""A node contains an op and its input and output tensor.
"""
def __init__(self, name: str,
op: Optional[str] = None,
dtype: Optional[str] = None,
in_quant_part: Optional[bool] = False):
super().__init__()
self._name = name
self._op = op
self._dtype = dtype
self._idx = -1
self._scope_name = ""
self._source_range = ""
self._normalized_name = ""
self._in_tensors = []
self._out_tensors = []
self._in_nodes = []
self._out_nodes = []
self._blocks = []
self._is_quantizable = in_quant_part
self._is_merged = False
self._transpose_in_order = None
self._transpose_out_order = None
self._topo_position = 0
self._block = None
self._graph = None
self._neighbor_nodes = [None, None]
self._target_device = None
def __repr__(self):
return f"Node(name={self.name}, id={self.idx}, op_type={self.op.type}, quant_state={self.in_quant_part})"
def __str__(self):
return json.dumps(self.description(), indent=2, separators=(',', ': '))
def __deepcopy__(self, memo):
raise NotImplementedError("Deep copy is prohibited, use `clone_from` instead.")
def clone_from(self, src_node, local_map):
tmp_attrs = src_node.op._attrs
tmp_params = src_node.op._params
tmp_configs = src_node.op._configs
src_node.op._params = copy.copy(tmp_params)
src_node.op._attrs = copy.copy(tmp_attrs)
src_node.op._configs = copy.copy(tmp_configs)
self.op = copy.copy(src_node.op)
self.op._export_attr_and_param()
src_node.op._attrs = tmp_attrs
src_node.op._params = tmp_params
src_node.op._configs = tmp_configs
self.op.clone_from(src_node.op, local_map)
def scope_name(self):
return self._scope_name
def scope_name(self, name):
self._scope_name = name
def description(self):
node_des = {}
node_des['name'] = self._name
node_des['scope_name'] = self._scope_name
node_des['idx'] = self._idx
node_des['dtype'] = self._dtype
node_des['enable_quant'] = self._is_quantizable
node_des['in_nodes'] = [i for i in self.in_nodes]
node_des['out_nodes'] = [o for o in self.out_nodes]
node_des['in_tensors'] = [it.description() for it in self.in_tensors]
node_des['out_tensors'] = [ot.description() for ot in self.out_tensors]
node_des['op'] = self._op.description()
if self._blocks:
for i, block in enumerate(self._blocks):
node_des[f'block_{i}'] = []
for n in sorted(block.nodes, key=lambda n: n.idx):
node_des[f'block_{i}'].append(n.description())
return node_des
def clean_connections(self):
self._in_nodes = []
self._out_nodes = []
def add_in_node(self, node_name: str):
if node_name not in self._in_nodes:
self._in_nodes.append(node_name)
def add_out_node(self, node_name: str):
if node_name not in self._out_nodes:
self._out_nodes.append(node_name)
def in_tensors(self):
return self._in_tensors
def out_tensors(self):
return self._out_tensors
def in_nodes(self):
nodes = []
for tensor in self.in_tensors:
if tensor.node is not None:
nodes.append(tensor.node.name)
return nodes
def out_nodes(self):
nodes = []
for out in self.out_tensors:
for use in out.uses:
nodes.append(use.user.name)
return nodes
def node_attr(self, key):
return self._op.get_attr(key)
def set_node_attr(self, key, value):
if all([val is None for val in self._op._attr_value_mem[key]]):
self._op.set_attr(key, value)
else:
self._op.update_attr(key, value)
def node_config(self, key):
return self._op.get_config(key)
def set_node_config(self, key, value):
self._op.set_config(key, value)
def has_bound_params(self):
return self._op.has_native_params()
def op_type(self):
return self.op.type
def name(self):
return self._name
def name(self, value):
self._name = value
def idx(self):
return self._idx
def idx(self, index):
self._idx = index
self.owning_graph.update_node_idx(self, index)
def op(self):
return self._op
def op(self, op):
self._op = op
def dtype(self):
return self._dtype
# @property
# def alias(self):
# return self._alias
def in_quant_part(self) -> bool:
return self._is_quantizable
def in_quant_part(self, quant_state: bool) -> None:
self._is_quantizable = quant_state
def module(self):
return self._module()
def module(self, module):
self._module = weakref.ref(module)
def blocks(self):
return self._blocks
def add_block(self, block):
self._blocks.append(block)
def has_custom_op(self):
return isinstance(self.op, CustomOp)
def get_attr_val(self, attr_name):
attr = self.node_attr(attr_name)
return attr.data if isinstance(attr, Tensor) else attr
def merged(self):
return self._is_merged
def merged(self, flag):
self._is_merged = flag
def transpose_in_order(self):
return self._transpose_in_order
def transpose_in_order(self, order):
self._transpose_in_order = order
def transpose_out_order(self):
return self._transpose_out_order
def transpose_out_order(self, order):
self._transpose_out_order = order
def set_node_attr_tensor_value(self, old_tensor, new_tensor):
for attr_name, attr_value in self.op.attrs.items():
if attr_value.value is old_tensor:
self.set_node_attr(attr_name, new_tensor)
def destroy(self):
if len(self.blocks) > 0:
raise RuntimeError("Can't destroy if or loop node.")
while len(self.out_tensors) > 0:
self.remove_output(len(self.out_tensors) - 1)
self.remove_all_inputs()
if self.in_node_list():
self.remove_from_list()
self.owning_graph.free_node(self)
def remove_output(self, i):
assert i < len(self.out_tensors)
assert len(self.out_tensors[i].uses) == 0
output = self.out_tensors.pop(i)
self.owning_graph.remove_tensor(output)
for output_offset in range(i, len(self.out_tensors)):
self.out_tensors[output_offset].offset -= 1
def replace_input_at(self, i, new_tensor):
old_tensor = self.in_tensors[i]
if old_tensor is new_tensor:
return
self.in_tensors[i] = new_tensor
uses = [u for u in old_tensor.uses]
attr_uses = [attr_u for attr_u in old_tensor.attr_uses]
for u in uses:
if u.user is self:
new_tensor.uses.append(u)
old_tensor.uses.remove(u)
for attr_u in attr_uses:
if attr_u.user is self.op:
old_tensor.replace_attr_with_new_tensor_v2(attr_u, new_tensor)
def remove_input(self, i):
self.drop_input(i)
for j in range(i + 1, len(self._in_tensors)):
it = self.find_use_for_input(j)
it.offset -= 1
self._in_tensors.pop(i)
def remove_all_inputs(self):
for i in range(len(self.in_tensors)):
self.drop_input(i)
self.in_tensors.clear()
def drop_input(self, i):
assert i < len(self.in_tensors)
input_value = self.in_tensors[i]
use_it = self.find_use_for_input(i)
input_value.uses.remove(use_it)
self.in_tensors[i] = None
return input_value
def find_use_for_input(self, i):
use_it = None
for use in self.in_tensors[i].uses:
if use.offset == i and use.user is self:
use_it = use
assert use_it is not None
return use_it
def owning_block(self):
return self._block
def owning_block(self, block):
self._block = block
def owning_graph(self):
return self._graph
def owning_graph(self, graph):
self._graph = graph
if self._graph:
self._graph.add_node(self)
def topo_position(self):
return self._topo_position
def topo_position(self, pos):
self._topo_position = pos
def insert_before(self, node):
assert node.in_node_list()
self.insert_after(node.prev_node)
def insert_after(self, node):
assert not self.in_node_list() and node.in_node_list()
assert node.owning_block is not None
self._block = node.owning_block
next_node = node.next_node
node.next_node = self
self.prev_node = node
self.next_node = next_node
next_node.prev_node = self
self.update_topo_position()
def update_topo_position(self):
is_first_node = self.prev_node is self.owning_block.input_node
is_last_node = self.next_node is self.owning_block.return_node
prev_pos = self.prev_node.topo_position
next_pos = self.next_node.topo_position
if is_last_node:
if is_first_node:
self.topo_position = MID_POSITION
return
if prev_pos >= (POSITION_UPPER_BOUND - APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = prev_pos + APPEND_INTERVAL
elif is_first_node:
if next_pos <= (POSITION_LOWER_BOUND + APPEND_INTERVAL):
self.owning_block.reindex_topo()
return
self.topo_position = next_pos - APPEND_INTERVAL
else:
pos_between = prev_pos + (next_pos - prev_pos) / 2
if pos_between == prev_pos:
self.owning_block.reindex_topo()
return
self.topo_position = pos_between
def next_node(self):
return self._neighbor_nodes[1]
def next_node(self, node):
self._neighbor_nodes[1] = node
def prev_node(self):
return self._neighbor_nodes[0]
def prev_node(self, node):
self._neighbor_nodes[0] = node
def in_node_list(self):
if self.next_node is None:
assert self.prev_node is None
return self.next_node is not None
def remove_from_list(self):
assert self.in_node_list()
if self.owning_block.input_node is self:
self.owning_block.input_node = self.next_node
self.owning_block = None
next_node = self.next_node
prev_node = self.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
self.next_node = None
self.prev_node = None
def add_in_tensor(self, tensor):
tensor.uses.append(Use(self, len(self.in_tensors)))
self._in_tensors.append(tensor)
self.owning_graph.add_tensor(tensor)
def add_out_tensor(self, tensor):
tensor.offset = len(self.out_tensors)
self._out_tensors.append(tensor)
tensor.node = self
self.owning_graph.add_tensor(tensor)
def target_device(self):
return self._target_device
def target_device(self, device):
self._target_device = device
def scope_name(self):
return self._scope_name
def scope_name(self, scope_name):
self._scope_name = scope_name
def source_range(self):
return self._source_range
def source_range(self, source_range):
self._source_range = source_range
def normalized_name(self):
return self._normalized_name
def normalized_name(self, name):
self._normalized_name = name
CONV_OPS = [
OpTypes.CONV2D, OpTypes.CONVTRANSPOSE2D, OpTypes.CONV3D,
OpTypes.CONVTRANSPOSE3D, OpTypes.SEPARABLECONV2D
]
def find_prunable_ancestor(graph, node, target_ops=CONV_OPS):
return find_ancestor(graph, node, target_ops, [OpTypes.CONCAT])
])
def raise_if_has_pruned_input(graph: Graph, node: Node, expanding_desc: Mapping[str, StructuredExpanding]):
for node_name in node.in_nodes:
input_pruning = expanding_desc[node_name]
if input_pruning.added_out_channel > 0:
input_node = graph.node(node_name)
if input_node.op.type in CONV_OPS:
prunable_node = input_node
else:
prunable_node = find_prunable_ancestor(graph, input_node)
raise RuntimeError(('Operation "{}" cannot take expanded tensor as input, '
'please exclude node "{}" from pruning.').format(
node.op.type, prunable_node.name)) | null |
23,314 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def normal_quant_neuron(data,
maxamps=[[32768], [2048]],
strides=[-1],
round_method=2,
keep_scale=True,
name='',
quantizer=None,
on_gpu=True,
as_int=False):
#integer need not keep scale as precondition
if as_int:
keep_scale = False
if len(strides) == 1:
data = __amplify_data(
data, maxamps[0][0], maxamps[1][0], method=round_method)
if keep_scale:
data = data / maxamps[1][0]
else:
org_shape = data.shape
flatten_data = data.flatten()
pos = 0
for idx, s in enumerate(strides):
flatten_data[pos:pos + s] = __amplify_data(
flatten_data[pos:pos + s],
maxamps[0][idx],
maxamps[1][idx],
method=round_method)
if keep_scale:
flatten_data[pos:pos + s] = flatten_data[pos:pos + s] / maxamps[1][idx]
pos += s
data = flatten_data.reshape(org_shape)
#return integer or origional dtype
if as_int:
assert all(m == maxamps[0][0]
for m in maxamps[0]), "all max limitation should be the same"
if maxamps[0][0] == 2**7 or maxamps[0][0] == 2**3:
return data.astype(np.int8)
elif maxamps[0][0] == 2**15:
return data.astype(np.int16)
else:
raise TypeError("unexpected max found " + str(maxamps[0][0]))
else:
return data
def quantize_data2int(data, bn, fp, method=2):
return normal_quant_neuron(
data, maxamps=[[2**(bn - 1)], [2**fp]], round_method=method, as_int=True) | null |
23,319 | import numpy as np
import math
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
from nndct_shared.utils import NndctOption
from nndct_shared.algorithms import breadth_first_search_handler
from .quant_ops import normal_quant_neuron
def maybe_get_quantizer(quantizer=None):
def quant_channel_scale_params(node, channel_scale):
quant_mode, quantizer = maybe_get_quantizer()
# ignore parameters quantization if the node is not to be quantized
#print('---- quant o: {}, in quant part:{}'.format(node.name, node.in_quant_part))
if not node.in_quant_part or quantizer is None:
return channel_scale
if quantizer.need_quantize_tensor(node.name, 'output'):
#print('---- quant o: {}'.format(node.name))
output_name = node.name
#print('qmode = %d, q_end: %d activation: %s' %
# (quant_mode, is_quant_end, output_name))
if quant_mode == 2:
datatype = 'int'
if NndctOption.nndct_only_int_quant.value is False:
datatype = quantizer.get_quant_dtype(node.name, tensor_type='output')
channel_scale = quantizer.quantize(
channel_scale, output_name, node, tensor_type='output', datatype=datatype)
return channel_scale | null |
23,328 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nndct_shared.base.key_names import NNDCT_OP
from nndct_shared.base.key_names import FrameworkType
from nndct_shared.nndct_graph import base_tensor
class DataFormatMap(object):
"""A dict mapping of framework and op type to its data format.
"""
_blob_format_map = {
FrameworkType.NNDCT: {
2: "NH",
3: "NLC",
4: "NHWC",
5: "NHWDC"
},
FrameworkType.TORCH: {
2: "NH",
3: "NCL",
4: "NCHW",
5: "NCDHW"
},
# TF format generated in runtime.
}
_parameter_format_map = {
FrameworkType.NNDCT: {
2: "OI",
3: "OLI",
4: "OHWI",
5: "OHWDI"
},
FrameworkType.TORCH: {
2: "OI",
3: "OIL",
4: "OIHW",
5: "OIDHW"
},
FrameworkType.TENSORFLOW: {
2: "IO",
3: "LIO",
4: "HWIO",
5: "DHWIO",
}
}
def blob_format(cls, framework_type, ndim):
if framework_type not in cls._blob_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._blob_format_map[framework_type][ndim]
def param_format(cls, framework_type, ndim):
if framework_type not in cls._parameter_format_map:
raise KeyError(
"Framework type '{}' not supported now.".format(framework_type))
return cls._parameter_format_map[framework_type][ndim]
def layout_transformer(src_layout, dst_layout):
assert len(src_layout) == len(dst_layout)
axes = []
for axis in dst_layout:
axes.append(src_layout.index(axis))
return tuple(axes)
def convert_parameter_tensor_format(tensor: base_tensor.Tensor,
src_framework: str,
dst_framework: str) -> base_tensor.Tensor:
if not isinstance(tensor, base_tensor.Tensor):
raise TypeError("'tensor' must be Tensor, but given {}".format(
type(tensor)))
if not tensor.is_complete_tensor():
return tensor
if src_framework == dst_framework:
return tensor
if tensor.ndim not in DataFormatMap._parameter_format_map[src_framework].keys():
return tensor
src_format = DataFormatMap.param_format(src_framework, tensor.ndim)
dst_format = DataFormatMap.param_format(dst_framework, tensor.ndim)
if src_format == dst_format:
return tensor
tensor.transpose(layout_transformer(src_format, dst_format))
return tensor | null |
23,333 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def _log_prefix(level, timestamp=None, file_and_line=None):
def get_logger(name=None, level=None, file_name=None, only2file=False):
def debug(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(DEBUG)}
get_logger().debug(msg, extra=extra, *args, **kwargs) | null |
23,334 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
basename,
line)
return s
def get_logger(name=None, level=None, file_name=None, only2file=False):
"""Return logger instance."""
# global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
# if _logger:
# return _logger
_logger_lock.acquire()
try:
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger(name)
if level:
logger.setLevel(_logging.INFO)
else:
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
if not only2file and all([not isinstance(hdler, _logging.StreamHandler) for hdler in logger.handlers]):
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
# _handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
if file_name is not None:
_file_handler = _logging.FileHandler(file_name)
logger.addHandler(_file_handler)
return logger
finally:
_logger_lock.release()
def info(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(INFO)}
get_logger().info(msg, *args, extra=extra, **kwargs) | null |
23,336 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
import threading
from .option_list import NndctOption
from nndct_shared.base import SingletonMeta
from .msg_code import QError, QWarning
def _log_prefix(level, timestamp=None, file_and_line=None):
"""Generate a nndct logline prefix."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _get_file_and_line()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %s:%d]' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
basename,
line)
return s
def get_logger(name=None, level=None, file_name=None, only2file=False):
"""Return logger instance."""
# global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
# if _logger:
# return _logger
_logger_lock.acquire()
try:
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger(name)
if level:
logger.setLevel(_logging.INFO)
else:
logger.setLevel(1)
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
#logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
if not only2file and all([not isinstance(hdler, _logging.StreamHandler) for hdler in logger.handlers]):
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
# _handler.setFormatter(_logging.Formatter(_logging_fmt, None))
logger.addHandler(_handler)
if file_name is not None:
_file_handler = _logging.FileHandler(file_name)
logger.addHandler(_file_handler)
return logger
finally:
_logger_lock.release()
def error(msg, *args, **kwargs):
extra = {'nndct_prefix': _log_prefix(ERROR)}
get_logger().error(msg, *args, extra=extra, **kwargs) | null |
23,346 | import numpy as np
from nndct_shared.base import NNDCT_OP
def get_in_out_channel_idx(ndim, optype, data_formats):
def get_tensor_in_dim(tensor, optype, data_formats):
in_idx, _ = get_in_out_channel_idx(tensor.ndim, optype, data_formats)
return tensor.shape[in_idx] | null |
23,350 | from typing import TypeVar, NoReturn, Optional, Iterator, List
from .option_list import NndctOption
from .option_def import Option, T
class NndctOption(object):
nndct_help = Option(name="help", dtype=bool, default=False, action="store_true",
help="list all api usage description")
nndct_quant_off = Option(name="quant_off", dtype=bool, default=False, action="store_true",
help="disable quantization flow")
nndct_option_list = Option(name="option_list", dtype=bool, default=False, action="store_true",
help="list all the options in nndct")
nndct_parse_debug = Option(name="parse_debug", dtype=int, default=0, env="NNDCT_PARSE_DEBUG",
help="logging graph, 1: torch raw graph, 2: nndct graph 3: nndct quant graph")
nndct_logging_level = Option(name="logging_level", dtype=int, default=0, help="logging level")
nndct_quant_mode = Option(name="quant_mode", dtype=int, default=0,
help="quant mode, 1:calibration, 2:quantization")
nndct_dump_float_format = Option(name="dump_float_format", dtype=int, default=0,
help="deploy check data format, 0: bin, 1: txt")
nndct_record_slow_mode = Option(name="record_slow_mode", dtype=bool, default=False, action="store_true",
help="record outputs every iteration")
nndct_quant_opt = Option(name="quant_opt", dtype=int, default=3, help="quant opt level")
nndct_relu6_replace = Option(name="relu6_replace", dtype=str, default='relu', help="relu6 replace operator")
nndct_equalization = Option(name="equalization", dtype=bool, default=True, action="store_true",
help="enable weights equalization")
# nndct_wes = Option(name="weights_equalizing_shift", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift")
# nndct_wes_in_cle = Option(name="weights_equalizing_shift in cle", dtype=bool, default=False, action="store_true",
# help="enable weights equalizing shift in cle")
nndct_param_corr = Option(name="param_corr", dtype=bool, default=True, action="store_true",
help="enable parameter correction")
nndct_param_corr_rate = Option(name="param_corr_rate", dtype=float, default=0.05, help="parameter correction rate")
nndct_cv_app = Option(name="cv_app", dtype=bool, default=True, action="store_true", help="cv application")
nndct_finetune_lr_factor = Option(name="finetune_lr_factor", dtype=float, default=0.01, help="finetune learning rate factor")
nndct_partition_mode = Option(name="partition_mode", dtype=int, default=0,
help="0: quant stub controled. 1: custom op controled")
nndct_stat = Option(name="stat", dtype=int, default=0, help="quantizer statistic level")
nndct_jit_script_mode = Option(name="jit_script_mode", dtype=bool, default=False, action="store_true", help="enable torch script parser")
nndct_diffs_mode = Option(name="diffs_mode", dtype=str, default='mse', help="diffs_mode: mse, maxmin")
nndct_ft_mode = Option(name="ft_mode", dtype=int, default=1, help="1: mix mode 0: cache mode")
nndct_visualize = Option(name="visualize", dtype=bool, default=False, action="store_true", help="visualize tensors")
nndct_dump_no_quant_part = Option(name="dump_no_quant_part", dtype=bool, default=False, action="store_true", help="dump no quantized nodes")
nndct_max_fix_position = Option(name="max_fix_position", dtype=int, default=12, help="maximum of fix position")
nndct_use_torch_quantizer = Option(name="use_torch_quantizer", dtype=bool, default=False, action="store_true", help="enable torch quantizer")
nndct_jit_trace = Option(name="jit_trace", dtype=bool, default=False, action="store_true", env="NNDCT_JIT_TRACE", help="parse graph from script tracing")
nndct_jit_script = Option(name="jit_script", dtype=bool, default=False, action="store_true", help="parse graph from script")
nndct_calib_histogram_bins = Option(name="calib_histogram_bins", dtype=int, default=2048, help="calibration histogram bins number")
nndct_mse_start_bin = Option(name="mse_start_bin", dtype=int, default=1536, help="mse calibration method start bin")
nndct_mse_stride = Option(name="mse_stride", dtype=int, default=16, help="mse calibration method stride")
nndct_entropy_start_bin = Option(name="entropy_start_bin", dtype=int, default=1536, help="entropy calibration method start bin")
nndct_entropy_stride = Option(name="entropy_stride", dtype=int, default=16, help="entropy calibration method stride")
nndct_convert_relu6_to_relu = Option(name="convert_relu6_to_relu", dtype=bool, default=False, help="convert relu6 to relu")
nndct_convert_sigmoid_to_hsigmoid = Option(name="convert_sigmoid_to_hsigmoid", dtype=bool, default=False, action="store_true", help="convert sigmoid to hsigmoid")
nndct_convert_silu_to_hswish = Option(name="convert_silu_to_hswish", dtype=bool, default=False, action="store_true", help="convert silu to hswish")
nndct_keep_first_last_layer_accuracy = Option(name="keep_first_last_layer_accuracy", dtype=bool, default=False, help="keep accuracy of first and last layer")
nndct_keep_add_layer_accuracy = Option(name="keep_add_layer_accuracy", dtype=bool, default=False, help="keep accuracy of add layer")
nndct_avg_pool_approximate = Option(name="avg_pool_approximate", dtype=bool, default=True, action="store_true", help="enable average pooling approximate for dpu")
nndct_leaky_relu_approximate = Option(name="leaky_relu_approximate", dtype=bool, default=True, action="store_true", help="enable leaky relu approximate for dpu")
nndct_conv_bn_merge = Option(name="conv_bn_merge", dtype=bool, default=True, action="store_true", help="enable conv and bn merge")
nndct_input_quant_only = Option(name="input_quant_only", dtype=bool, default=False, action="store_false", help="only quantize the input")
nndct_tensorrt_strategy = Option(name="tensorrt_strategy", dtype=bool, default=False, action="store_true", help="use quantization strategy as tensorrt")
nndct_tensorrt_quant_algo = Option(name="tensorrt_quant_algo", dtype=bool, default=False, action="store_true", help="use tensorrt quantization algorithm")
nndct_calibration_local = Option(name="calibration_local", dtype=bool, default=True, action="store_true", help="calibration in local batch data")
nndct_change_concat_input_fix = Option(name="change_concat_input_fix", dtype=bool, default=False, action="store_true", help="change concat input nodes fix point to be the same as concat output node")
nndct_change_pool_input_fix = Option(name="change_pool_input_fix", dtype=bool, default=False, action="store_true", help="change pooling input nodes fix point to be the same as their output node")
nndct_change_add_input_fix = Option(name="change_add_input_fix", dtype=bool, default=False, action="store_true", help="change add input nodes fix point to be the identical")
nndct_insert_concat_input_fix = Option(name="insert_concat_input_fix", dtype=bool, default=False, action="store_true", help="insert concat input nodes fix point to be the same as concat output node")
nndct_export_jit = Option(name="export_jit", dtype=bool, default=False, action="store_true", env="NNDCT_EXPORT_JIT", help="export quant script by inserting fixneuron")
nndct_deploy_check = Option(name="deploy_check", dtype=bool, default=False, action="store_true", help="dump deploy data in forward process")
nndct_input_check = Option(name="input_check", dtype=bool, default=False, action="store_true", help="dump input float data in forward process")
nndct_op_tanh_sigmoid_mode = Option(name="tanh_sigmoid_mode", dtype=str, default='quant_input_output', help="Tanh/sigmoid quantization mode: quant_input_output, table_look_up, simulation, aie2_lut_16bw")
nndct_op_softmax_mode = Option(name="softmax_mode", dtype=str, default='quant_input_output', help="Softmax quantization mode: quant_input_output, hardware_pl, liyi, aie2_lut_16bw, bert_8bw, ipu_8bw")
nndct_op_logsoftmax_mode = Option(name="logsoftmax_mode", dtype=str, default='quant_input_output', help="Logsoftmax quantization mode: quant_input_output, aie2_lut_16bw")
nndct_op_gelu_mode = Option(name="gelu_mode", dtype=str, default='quant_input_output', help="GELU quantization mode: quant_input_output, dynamic_table")
nndct_op_layernorm_mode = Option(name="layernorm_mode", dtype=str, default='quant_input_output', help="Layernorm quantization mode: quant_input_output, aie2_16bw, bert_8bw")
nndct_ip_asr = Option(name="ip_asr", dtype=bool, default=False, action="store_true", help="asr quant method")
nndct_ip_v70_bert = Option(name="ip_v70_bert", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_ip_v70_bert_qat = Option(name="ip_v70_bert_qat", dtype=bool, default=False, action="store_true", help="bert v70 quant method")
nndct_use_old_inspector = Option(name="use_old_inspector", dtype=bool, default=False, action="store_true", env="NNDCT_USE_OLD_INSPECTOR", help="switch to old inspector")
nndct_calib_before_finetune = Option(name="calib_before_finetune", dtype=bool, default=False, action="store_true", help="calibration before fast finetune")
nndct_inspect_debug = Option(name="inspect_debug", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_DEBUG", help="turn on inspector")
nndct_op_instancenorm_mode = Option(name="instancenorm_mode", dtype=str, default='quant_input_output', help="Instancenorm quantization mode: quant_input_output, ipu_8bw")
nndct_op_groupnorm_mode = Option(name="groupnorm_mode", dtype=str, default='quant_input_output', help="Groupnorm quantization mode: quant_input_output, ipu_8bw")
nndct_native_onnx = Option(name="native_onnx", dtype=bool, default=False, action="store_true", help="export native quant-dequant onnx models")
nndct_inspect_test = Option(name="inspect_test", dtype=bool, default=False, action="store_true", env="NNDCT_INSPECT_TEST", help="embed target related test in torch quantizer")
nndct_target = Option(name="target", dtype=str, default="", env="NNDCT_TARGET", help="target name")
nndct_traversal_graph_mode = Option(name="nndct_traversal_graph_mode", dtype=int, default=0, env="NNDCT_GRAPH_SEARCH",help="0: auto, 1:recursion, 2: iteration")
nndct_op_sqrt_mode = Option(name="sqrt_mode", dtype=str, default='quant_input_output', help="sqrt quantization mode: quant_input_output, ipu_8bw")
nndct_onnx_opset_version = Option(name="onnx_opset_version", dtype=int, default=-1, help="opset_version of dumped onnx graph")
nndct_only_int_quant = Option(name="only_int_quant", dtype=bool, default=True, help="only int datatype quantization included")
nndct_gemm88 = Option(name="gemm88", dtype=bool, default=False, action="store_true", help="only quant gemm88 and matmul88")
nndct_pooling_split_mode = Option(name="nndct_pooling_split_mode", dtype=bool, default=False, help="default big pooling will split to small pooling")
nndct_fx_mode = Option(name="fx_mode", dtype=bool, default=False, action="store_true", env="NNDCT_FX_MODE", help="turn on fx mode")
T = TypeVar('T')
def set_option_value(option_name: str, option_value: T) -> NoReturn:
NndctOption.__dict__[option_name].value = option_value | null |
23,360 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def remove_trans_scp_prefix(name, scp=None):
name = remove_prefix(name, scp)
if name.startswith(NNDCT_KEYS.TRANS_SCOPE):
name = '/'.join(name.split('/')[1:])
return name
def scoped_untrans_name(name, scp):
org_name = remove_trans_scp_prefix(name, scp)
return scp + org_name | null |
23,361 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def remove_prefix(obj, prefix):
if obj is None:
return obj
if prefix is None or prefix == '':
return obj
if isinstance(prefix, str):
if isinstance(obj, str) and len(prefix) > 0 and obj.startswith(prefix):
obj = obj[len(prefix):]
elif isinstance(obj, dict):
obj = {k: remove_prefix(v, prefix) for k, v in obj.items()}
elif isinstance(obj, list):
obj = [remove_prefix(item, prefix) for item in obj]
return obj
elif isinstance(prefix, list):
for pre in prefix:
obj = remove_prefix(obj, pre)
return obj
else:
raise Exception('prefix {} is not string or list!'.format(prefix))
def scoped_trans_name(name, scp):
org_name = remove_prefix(name, scp)
if org_name.startswith(NNDCT_KEYS.TRANS_SCOPE):
return scp + org_name
else:
return scp + NNDCT_KEYS.TRANS_SCOPE + '/' + org_name | null |
23,362 | from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
from .log import nndct_debug_print
def remove_trans_scp_prefix(name, scp=None):
name = remove_prefix(name, scp)
if name.startswith(NNDCT_KEYS.TRANS_SCOPE):
name = '/'.join(name.split('/')[1:])
return name
GLOBAL_MAP = GlobalMap()
def nndct_debug_print(string, title='', level=1):
if True == GLOBAL_MAP.get_ele(
NNDCT_KEYS.DEBUG_FLAG) and level <= GLOBAL_MAP.get_ele(
NNDCT_KEYS.VERBOSE_LEVEL):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if title == 'Start':
string = "\n********************* <{} : {}> *********************".format(
title, string)
elif title == 'End':
string = "\n********************* <{} : {}> *********************\n".format(
title, string)
if logger:
logger.debug("[NNDCT_DEBUG_Lv_{}] {}".format(level, string))
else:
print("[NNDCT_DEBUG_Lv_{}] {}".format(level, string))
def map_output_and_node(output, node_or_name, model_type):
if node_or_name is None:
return
if isinstance(node_or_name, str):
node_name = node_or_name
else:
node_name = node_or_name.name
node_name = remove_trans_scp_prefix(node_name)
def _do_map(output_name, node_name):
if not output_name == node_name:
if not GLOBAL_MAP.get_ele(NNDCT_KEYS.OUTPUT_TO_NODE_MAP):
GLOBAL_MAP.set_map(NNDCT_KEYS.OUTPUT_TO_NODE_MAP, {})
if not GLOBAL_MAP.get_ele(NNDCT_KEYS.NODE_TO_OUTPUT_MAP):
GLOBAL_MAP.set_map(NNDCT_KEYS.NODE_TO_OUTPUT_MAP, {})
#map output to node
output_to_node_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.OUTPUT_TO_NODE_MAP)
if not output_name in output_to_node_map:
nndct_debug_print(
"<map_output_and_node> map out {} and node{}".format(
output_name, node_name),
level=NNDCT_DEBUG_LVL.BUILD_GRAPH)
output_to_node_map[output_name] = node_name
else:
assert output_to_node_map[
output_name] == node_name, "restored node name for output_name {} is {}, meet new node name {}".format(
output_name, output_to_node_map[output_name], node_name)
#add output to list keyed by node_name
node_to_output_map = GLOBAL_MAP.get_ele(NNDCT_KEYS.NODE_TO_OUTPUT_MAP)
if not node_name in node_to_output_map:
node_to_output_map[node_name] = [output_name]
else:
node_to_output_map[node_name].append(output_name)
if isinstance(output, str):
_do_map(output, node_name) | null |
23,366 | from enum import Enum
def readable_num(number):
s = ''
if number < 0:
s += '-'
number = -number
if number < 1000:
s += '%d' % number
elif number > 1e15:
s += '%0.3G' % number
else:
units = 'KMGT'
unit_index = 0
while number > 1000000:
number /= 1000
unit_index += 1
s += '%.2f%s' % (number / 1000.0, units[unit_index])
return s | null |
23,367 | from enum import Enum
def print_table(header, rows):
if any(len(row) != len(header) for row in rows):
raise ValueError('Column length must be equal to headers')
column_widths = [len(field) for field in header]
for row in rows:
for i, field in enumerate(row):
column_widths[i] = max(len(str(field)), column_widths[i])
spaces_between_columns = 1
current_pos = 0
column_positions = []
for i in range(len(column_widths)):
column_positions.append(current_pos + column_widths[i] +
spaces_between_columns)
current_pos = column_positions[-1]
line_length = column_positions[-1]
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('=' * line_length)
print_row(header, column_positions)
print('=' * line_length)
for row in rows:
print_row(row, column_positions)
print('-' * line_length) | null |
23,373 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def copy_folder_files(new_dir, old_dir):
def force_create_dir(dir_name, copy_from_dir=None):
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
if copy_from_dir:
copy_folder_files(dir_name, copy_from_dir) | null |
23,374 | import os
import shutil
import json
import sys
import numpy as np
from .log import log_or_print
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP
def create_work_dir(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name) | null |
23,383 | import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
def obj_to_str(obj):
def nndct_debug_print(string, title='', level=1):
def nndct_details_debug(obj, title, level=NNDCT_DEBUG_LVL.DETAILS):
nndct_debug_print(
"\n********************* <Start : {}> *********************\n{}".format(
title, obj_to_str(obj)),
level=level)
nndct_debug_print(title, title='End', level=level) | null |
23,391 | import math
from nndct_shared.base import NNDCT_OP
def get_avgpool_dpu_coeff(kernel):
scale = 1.0
if kernel == [3, 3]:
scale = 9.0 * 7.0 / 64.0
elif kernel == [5, 5]:
scale = 25.0 * 10.0 / 256.0
elif kernel in [[6, 6], [3, 6], [6, 3]]:
scale = 36.0 * 7.0 / 256.0
elif kernel == [7, 7]:
scale = 49.0 * 21.0 / 1024.0
elif kernel == [14, 14]:
scale = 196.0 * 21.0 / 4096.0
else:
rec = kernel[0] * kernel[1]
max_factor = math.ceil(math.log(rec * 128,2))
diff = 1.0
multi_factor = 0.0
shift_factor = 0.0
for shift_factor_ in range(max_factor):
factor = round((2 ** shift_factor_)/rec)
diff_ = abs(factor / (2 ** shift_factor_) - 1/rec)
if diff_ < diff:
multi_factor = factor
diff = diff_
shift_factor = shift_factor_
scale = rec * multi_factor / (2 ** shift_factor)
return scale | null |
23,392 | import h5py
import json
from nndct_shared.nndct_graph.base_tensor import Tensor
class GraphHDF5Saver():
def __init__(self, nndct_graph):
def get_node_config(self, node):
def get_model_config(self):
def save(self, hdf5_path):
def save_graph(nndct_graph, hdf5_path='graph.hdf5'):
GraphHDF5Saver(nndct_graph).save(hdf5_path) | null |
23,393 | import copy
import gc
import inspect
import json
import numpy as np
import os
import random
import tensorflow as tf
import types
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from nndct_shared.pruning import errors
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.utils import common
from tf_nndct.graph import parser
from tf_nndct.graph.ops import OpTypes
from tf_nndct.pruning import pruning_impl
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils as ku
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
import os
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
def _is_debug():
return os.environ.get('VAI_OPTIMIZER_DEBUG', None) == '1' | null |
23,394 | import copy
import gc
import inspect
import json
import numpy as np
import os
import random
import tensorflow as tf
import types
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from nndct_shared.pruning import errors
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.utils import common
from tf_nndct.graph import parser
from tf_nndct.graph.ops import OpTypes
from tf_nndct.pruning import pruning_impl
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils as ku
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
keras = tf.keras
def add_pruning_mask(instance):
def build(self, input_shape):
type(self).build(self, input_shape)
weight_vars, mask_vars, = [], []
# For each of the weights, add mask variables.
for weight in self.weights:
# res2a_branch2a/kernel:0 -> kernel
weight_name = weight.name.split('/')[-1].split(':')[0]
mask = self.add_weight(
weight_name + '_mask',
shape=weight.shape,
initializer=keras.initializers.get('ones'),
dtype=weight.dtype,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
weight_vars.append(weight)
mask_vars.append(mask)
pruning_vars = list(zip(weight_vars, mask_vars))
# Create a pruning object
self.pruning_obj = pruning_impl.Pruning(pruning_vars)
def call(self, inputs, training=None, **kwargs):
if training is None:
training = keras.backend.learning_phase()
# Always execute the op that performs weights = weights * mask
# Relies on UpdatePruningStep callback to ensure the weights
# are sparse after the final backpropagation.
#
# self.add_update does nothing during eager execution.
self.add_update(self.pruning_obj.weight_mask_op())
# TODO(evcu) remove this check after dropping py2 support. In py3 getargspec
# is deprecated.
layer_call = type(self).call
if hasattr(inspect, 'getfullargspec'):
args = inspect.getfullargspec(layer_call).args
else:
args = inspect.getargspec(layer_call).args
# Propagate the training bool to the underlying layer if it accepts
# training as an arg.
if 'training' in args:
return layer_call(self, inputs, training=training, **kwargs)
return layer_call(self, inputs, **kwargs)
instance.build = types.MethodType(build, instance)
instance.call = types.MethodType(call, instance)
return instance | null |
23,395 | import copy
import gc
import inspect
import json
import numpy as np
import os
import random
import tensorflow as tf
import types
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from nndct_shared.pruning import errors
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.utils import common
from tf_nndct.graph import parser
from tf_nndct.graph.ops import OpTypes
from tf_nndct.pruning import pruning_impl
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils as ku
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
keras = tf.keras
def save_weights(model,
filepath,
overwrite=True,
save_format=None,
options=None):
filepath = generic_utils.path_to_string(filepath)
filepath_is_h5 = (
filepath.endswith('.h5') or filepath.endswith('.keras') or
filepath.endswith('.hdf5'))
if save_format is None:
if filepath_is_h5:
save_format = 'h5'
else:
save_format = 'tf'
else:
user_format = save_format.lower().strip()
if user_format in ('tensorflow', 'tf'):
save_format = 'tf'
elif user_format in ('hdf5', 'h5', 'keras'):
save_format = 'h5'
else:
raise errors.OptimizerDataFormatError(
'Unknown format "%s". Was expecting one of {"tf", "h5"}.' %
(save_format,))
if save_format == 'h5':
raise errors.OptimizerDataFormatError((
'HDF5 format is not allowed for sparse model, please '
'use "tf" format. See '
'https://www.tensorflow.org/api_docs/python/tf/keras/Model#save_weights'
))
keras.Model.save_weights(model, filepath, overwrite, save_format, options) | null |
23,396 | import copy
import gc
import inspect
import json
import numpy as np
import os
import random
import tensorflow as tf
import types
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from nndct_shared.pruning import errors
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.utils import common
from tf_nndct.graph import parser
from tf_nndct.graph.ops import OpTypes
from tf_nndct.pruning import pruning_impl
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils as ku
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
def generate_indices_group(indices: List[int], dim_size: int,
groups: int) -> List[List[int]]:
indices_set = set(indices)
interval: int = dim_size // groups
start_idx = 0
end_idx = interval
ret: List[List[int]] = []
while start_idx < dim_size:
idx_group: List[int] = []
for i in range(start_idx, end_idx):
if i in indices_set:
idx_group.append(i - start_idx)
ret.append(idx_group)
start_idx = end_idx
end_idx += interval
return ret
The provided code snippet includes necessary dependencies for implementing the `_sparsify_tensor` function. Write a Python function `def _sparsify_tensor(t: np.ndarray, out_channels, in_channels, groups: int = 1)` to solve the following problem:
Fill 0 in removed channels.
Here is the function:
def _sparsify_tensor(t: np.ndarray, out_channels, in_channels, groups: int = 1):
"""Fill 0 in removed channels."""
tensor = copy.deepcopy(t)
dim_size = len(tensor.shape)
if dim_size == 0:
# e.g layers.Normalization the param 'count' is a num so the shape = 0
return tensor
assert dim_size in [1, 2, 4, 5] # 5: 3D conv
# weight format in tensorflow: HWIO/IO/DHWIO
if groups == 1:
if out_channels:
tensor[..., out_channels] = 0.0
if in_channels and dim_size > 1:
if dim_size == 2:
tensor[in_channels, :] = 0.0
else:
tensor[..., in_channels, :] = 0.0
else:
out_dims_group = generate_indices_group(out_channels, tensor.shape[-1],
groups)
in_dims_group = generate_indices_group(
in_channels, tensor.shape[-2] *
groups, groups) if dim_size > 1 else [[]] * groups
parts = np.split(tensor, groups, axis=-1)
sparse_parts: List[torch.Tensor] = []
for part, o, i in zip(parts, out_dims_group, in_dims_group):
sparse_parts.append(_sparsify_tensor(part, o, i))
tensor = np.concatenate(sparse_parts, axis=-1)
return tensor | Fill 0 in removed channels. |
23,397 | import copy
import gc
import inspect
import json
import numpy as np
import os
import random
import tensorflow as tf
import types
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from nndct_shared.pruning import errors
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.utils import common
from tf_nndct.graph import parser
from tf_nndct.graph.ops import OpTypes
from tf_nndct.pruning import pruning_impl
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils as ku
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
The provided code snippet includes necessary dependencies for implementing the `_prune_tensor` function. Write a Python function `def _prune_tensor(t: np.ndarray, out_channels, in_channels)` to solve the following problem:
Remove dimensions by giving channels.
Here is the function:
def _prune_tensor(t: np.ndarray, out_channels, in_channels):
"""Remove dimensions by giving channels."""
tensor = copy.deepcopy(t)
dim_size = len(tensor.shape)
# weight format in tensorflow: HWIO/IO
assert dim_size in [1, 2, 4]
if dim_size == 1:
out_axis, in_axis = 0, None
elif dim_size == 2:
out_axis, in_axis = 1, 0
else:
out_axis, in_axis = 3, 2
if out_channels:
tensor = np.delete(tensor, out_channels, axis=out_axis)
if in_channels and in_axis is not None:
tensor = np.delete(tensor, in_channels, axis=in_axis)
return tensor | Remove dimensions by giving channels. |
23,398 | import copy
import gc
import inspect
import json
import numpy as np
import os
import random
import tensorflow as tf
import types
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from nndct_shared.pruning import errors
from nndct_shared.pruning import pruner as pruner_lib
from nndct_shared.pruning import pruning_lib
from nndct_shared.pruning import search
from nndct_shared.pruning import sensitivity as sens
from nndct_shared.utils import common
from tf_nndct.graph import parser
from tf_nndct.graph.ops import OpTypes
from tf_nndct.pruning import pruning_impl
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils as ku
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
from nndct_shared.pruning.utils import generate_indices_group
from nndct_shared.pruning.pruning_lib import is_grouped_conv, is_depthwise_conv
class IterativePruningRunner(PruningRunner):
def __init__(self, model: keras.Model, input_signature: tf.TensorSpec):
super(IterativePruningRunner, self).__init__(model, input_signature)
self._sens_path = os.path.join(_VAI_DIR, self._graph.name + '.sens')
self._latest_spec = os.path.join(_VAI_DIR, 'latest_spec')
def ana(self,
eval_fn,
excludes=None,
forced=False,
with_group_conv: bool = False):
"""Performs model analysis. The analysis result will be saved in '.vai'
directory and this cached result will be used directly in subsequent
calls unless 'forced' is set to True.
Arguments:
eval_fn: Callable object that takes a keras.Model object as its first
argument and returns the evaluation score.
excludes: excludes: A list of node name or torch module to be excluded
from pruning.
forced: When set to True, forced to run model analysis instead of using
cached analysis result.
"""
if not forced:
net_sens = self._load_analysis_result()
if net_sens is not None:
logging.info(
'Using cached analysis result. If you want to re-analyze the model, set forced=True'
)
return
excluded_nodes = self._get_exclude_nodes(excludes) if excludes else []
self._ana_pre_check(eval_fn, excluded_nodes, with_group_conv)
self._ana(eval_fn, excluded_nodes, with_group_conv)
def prune(self,
ratio=None,
threshold=None,
spec_path=None,
excludes=None,
mode='sparse',
channel_divisible=2):
"""Prune the baseline model and returns a sparse model. The degree of model
reduction can be specified in three ways: ratio, threshold or pruning
specification. The first method should be used in preference, the latter
two are more suitable for experiments with manual tuning.
Arguments:
ratio: The expected percentage of MACs reduction of baseline model.
This is just a hint value and the actual MACs reduction not
strictly equals to this value.
threshold: Relative proportion of model performance loss between
baseline model and the pruned model.
spec_path: Pruning specfication path used to prune the model.
excludes: A list of layer name or layer instance to be excluded from pruning.
mode: In which mode the pruned model is generated. Should be either
'sparse' or 'slim'. Must be 'sparse' in iterative pruning loop.
channel_divisible: The number of remaining channels in the pruned layer
can be divided by channel_divisble.
Returns:
A sparse or a slim model according to given 'mode'.
"""
if ratio or threshold:
net_sens = self._load_analysis_result()
if net_sens is None:
raise errors.OptimizerNoAnaResultsError(
"Must call ana() before model pruning.")
excluded_nodes = self._get_exclude_nodes(excludes) if excludes else []
if ratio:
if not isinstance(ratio, float):
raise errors.OptimizerInvalidArgumentError(
'Expected "ratio" to be float, but got {}({})'.format(
ratio, type(ratio)))
logging.info('Pruning ratio = {}'.format(ratio))
spec = self._spec_from_ratio(net_sens, ratio, excluded_nodes)
spec.channel_divisible = channel_divisible
target = ('ratio', ratio)
elif threshold:
if not isinstance(threshold, float):
raise errors.OptimizerInvalidArgumentError(
'Expected "threshold" to be float, but got {}({})'.format(
threshold, type(threshold)))
logging.info('Pruning threshold = {}'.format(threshold))
spec = self._spec_from_threshold(net_sens, threshold, excluded_nodes)
spec.channel_divisible = channel_divisible
target = ('threshold', threshold)
elif spec_path:
logging.info('Pruning specification = {}'.format(spec_path))
spec = self._spec_from_path(spec_path)
else:
raise errors.OptimizerInvalidArgumentError(
'One of [ratio, threshold, spec_path] must be given.')
if ratio or threshold:
filename = '{}_{}_{}.spec'.format(self._graph.name, *target)
spec_path = os.path.join(_VAI_DIR, filename)
json.dump(spec.serialize(), open(spec_path, 'w'), indent=2)
logging.info('Pruning specification saves in {}'.format(spec_path))
with open(self._latest_spec, 'w') as f:
json.dump(spec_path, f)
if mode == 'slim':
logging.warn(
('UserWarning: slim model can not be used for the next iteration. '
'Set "mode=sparse" for iterative purpose.'))
model = self._prune(spec, mode)
if mode == 'sparse':
model.save_weights = types.MethodType(save_weights, model)
logging.info('Pruning summary:')
self._summary(spec)
return model
def get_slim_model(self, spec_path=None):
"""Get a slim model from a sparse model. Use the latest pruning
specification to do this transformation by default. If the sparse model
was not generated from the latest specification, a specification path
can be provided explicitly.
Arguments:
spec_path: Path of pruning specification used to transform a sparse
model to a slim model.
Returns:
A shrinked slim model.
"""
spec_path = spec_path or json.load(open(self._latest_spec))
logging.info('Get slim model from specification {}'.format(spec_path))
return self._prune(self._spec_from_path(spec_path), 'slim')
def _load_analysis_result(self):
return sens.load_sens(self._sens_path) if os.path.exists(
self._sens_path) else None
def _ana_pre_check(self, eval_fn, excludes, with_group_conv: bool = False):
"""Prune model but not test it to check if all pruning steps can pass."""
logging.info('Pre-checking for analysis...')
groups = pruning_lib.group_nodes(self._graph, excludes, with_group_conv)
spec = pruning_lib.PruningSpec.from_node_groups(groups, 0.9)
pruner = pruner_lib.ChannelPruner(self._graph)
pruned_graph, net_pruning = pruner.prune(spec)
model = self._get_slim_model(pruned_graph, net_pruning)
eval_fn(model)
def _ana(self, eval_fn, excludes, with_group_conv: bool = False):
analyser = sens.ModelAnalyser(self._graph, excludes, with_group_conv)
steps = analyser.steps()
for step in range(steps):
model = self._prune(analyser.spec(step))
eval_res = eval_fn(model)
if not isinstance(eval_res, (int, float)):
raise errors.OptimizerInvalidArgumentError(
'int or float expected, but got {}'.format(type(eval_res)))
analyser.record(step, eval_res)
logging.info('Analysis complete %d/%d' % (step + 1, steps))
del model
keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
analyser.save(self._sens_path)
def _spec_from_ratio(self, net_sens, ratio, excludes):
logging.info('Searching for appropriate ratio for each layer...')
flops = ku.try_count_flops(self._model)
target_flops = (1 - ratio) * flops
flops_tolerance = 1e-2
min_th = 1e-5
max_th = 1 - min_th
num_attempts, max_attempts = 0, 100
prev_spec = None
cur_spec = None
while num_attempts < max_attempts:
prev_spec = cur_spec
num_attempts += 1
threshold = (min_th + max_th) / 2
cur_spec = self._spec_from_threshold(net_sens, threshold, excludes)
slim_model = self._prune(cur_spec, mode='slim')
current_flops = ku.try_count_flops(slim_model)
error = abs(target_flops - current_flops) / target_flops
if error < flops_tolerance:
break
if current_flops < target_flops:
max_th = threshold
else:
min_th = threshold
return cur_spec
def _spec_from_threshold(self, net_sens, threshold, excludes):
groups = net_sens.prunable_groups_by_threshold(threshold, excludes)
return pruning_lib.PruningSpec(groups)
def _spec_from_path(self, path):
return pruning_lib.PruningSpec.deserialize(json.load(open(path, 'r')))
def _summary(self, spec):
orig_flops = common.readable_num(ku.try_count_flops(self._model))
orig_params = common.readable_num(ku.try_count_params(self._model))
slim_model = self._prune(spec, 'slim')
current_flops = common.readable_num(ku.try_count_flops(slim_model))
current_params = common.readable_num(ku.try_count_params(slim_model))
header_fields = ['Metric', 'Baseline', 'Pruned']
flops_fields = ['FLOPs', orig_flops, current_flops]
params_fields = ['Params', orig_params, current_params]
common.print_table(header_fields, [flops_fields, params_fields])
class OneStepPruningRunner(PruningRunner):
"""Implements channel pruning at the model level."""
def __init__(self, model: keras.Model, input_signature: tf.TensorSpec):
"""Concrete example:
```python
model = MyModel()
pruner = OneStepPruningPruner(model, tf.TensorSpec(input_shape, tf.float32))
model = pruner.search_subnets(0.2, train_fn, eval_fn, 1000)
```
Arguments:
model (keras.Model): Model to prune.
input_signature(tuple or list): The input specifications of model.
"""
super(OneStepPruningRunner, self).__init__(model, input_signature)
def _searcher_saved_path(self, ratio):
return os.path.join(_VAI_DIR, self._graph.name + '_search_{}'.format(ratio))
def _random_ratios(self, count):
return [random.random() for _ in range(count)]
def search_subnets(self,
ratio,
train_fn,
eval_fn,
num_iterations,
excludes=None,
config=None,
with_group_conv: bool = False):
if not isinstance(ratio, float):
raise errors.OptimizerInvalidArgumentError(
'Expect "ratio" to be float, but got {}({})'.format(
ratio, type(ratio)))
excluded_nodes = self._get_exclude_nodes(excludes) if excludes else []
groups = pruning_lib.group_nodes(self._graph, excluded_nodes,
with_group_conv)
searcher = search.SubnetSearcher(groups)
score = eval_fn(self._model)
base_flops = ku.try_count_flops(self._model)
searcher.set_supernet(score, base_flops)
searcher_saved_path = self._searcher_saved_path(ratio)
for i in range(num_iterations):
ratios = self._random_ratios(len(groups))
model = self._prune(searcher.spec(ratios))
current_flops = ku.try_count_flops(model)
flops_ratio = 1 - current_flops / base_flops
print('Iter {}: ratios={}, flops_ratio = {}'.format(
i, ratios, flops_ratio))
eps = 0.05
if flops_ratio > ratio - eps and flops_ratio < ratio + eps:
train_fn(model)
score = eval_fn(model)
searcher.add_subnet(ratios, score, current_flops)
search.save_searcher(searcher, searcher_saved_path)
logging.info('Found subnet: ratios={}, score={}'.format(ratios, score))
del model
#gc.collect()
keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
logging.info('Search results saved in {}'.format(searcher_saved_path))
def get_subnet(self, ratio, index=None):
searcher = search.load_searcher(self._searcher_saved_path(ratio))
subnet = searcher.subnet(index) if index else searcher.best_subnet()
print('best_subnet:', subnet)
spec = searcher.spec(subnet.ratios)
logging.vlog(1, 'Get subnet from spec:\n{}'.format(spec))
return self._prune(spec)
def get_pruning_runner(model, input_signature, method='one_step'):
assert method in ['iterative', 'one_step']
cls = IterativePruningRunner if method == 'iterative' else OneStepPruningRunner
return cls(model, input_signature) | null |
23,399 | import inspect
import numpy as np
from tensorflow import keras
from nndct_shared.pruning import errors
from tf_nndct.pruning import pruning_impl
class PruneMaskedWeight(keras.layers.Wrapper):
"""This wrapper augments a keras layer so the weight tensor may be pruned.
This wrapper implements magnitude-based pruning of the weight tensors.
Magnitude-based pruning achieves a target sparsity (s% of zeros) for a given
weight tensor by monitoring the distribution of the absolute values of the
weight tensor and determining the weight value (referred to as threshold)
below which s% of elements lie. For every weight tensor being pruned, the
wrapper maintains an identically shaped tensor (referred to as mask) which
stores 0 if the weight value lies below the threshold.
The mask and thresholds are computed during the training based on the
evolution of the weight values.
Block sparse patterns:
For certain SIMD hardware architectures, it may be beneficial to induce
spatially correlated sparsity. To train models in which the weight tensors
have block sparse structure, the pruning wrapper can be configured with
the block_height and block_width configuration parameters set to the desired
block configuration (2x2, 4x4, 4x1, 1x8, etc). This is applicable to
rank-2 weight tensor only and the tensor partitioned into non-overlapping
blocks of size [block_height, block_dim]. Either the average or max absolute
value in this block is taken as a proxy for the entire block
(set by block_pooling_function configuration parameter)
while computing the distribution of the weight values and
the threshold for pruning.
Custom keras layers:
The pruning wrapper can also be applied to a user-defined keras layer.
Such a layer may contain one or more weight tensors that may be pruned.
To apply pruning wrapper to such layers, the layer should be a `PrunableLayer`
instance or, more directly, user should define a `get_prunable_weights` method
for the layer (Check the pruning_wrapper_test.CustomLayerPrunable for more
details about how to define a user-defined prunable layer).
Sparsity function:
The target sparsity for the weight tensors are set through the
pruning_schedule parameter of the pruning wrapper. The user must create a
python callable that returns a scalar tensorflow tensor and pass this
callable to the sparsity_function parameter. This scalar tensor contains the
target sparsity value for the weight tensors in the layer.
The wrapper provides the following pre-built sparsity functions:
"""
def __init__(self, layer, **kwargs):
"""Create a pruning wrapper for a keras layer.
Args:
layer: The keras layer to be pruned.
**kwargs: Additional keyword arguments to be passed to the keras layer.
"""
# An instance of the Pruning class. This class contains the logic to prune
# the weights of this layer.
self.pruning_obj = None
# A list of all (weight,mask) tuples for this layer
self.pruning_vars = []
if not isinstance(layer, tf.keras.layers.Layer):
raise errors.OptimizerKerasLayerError(
'Please initialize `Prune` layer with a '
'`Layer` instance. You passed: {input}'.format(input=layer))
# TODO(pulkitb): This should be pushed up to the wrappers.py
# Name the layer using the wrapper and underlying layer name.
# Prune(Dense) becomes prune_dense_1
kwargs.update({'name': 'pruned_{}'.format(layer.name)})
super(PruneMaskedWeight, self).__init__(layer, **kwargs)
self._track_trackable(layer, name='layer')
# TODO(yunluli): Work-around to handle the first layer of Sequential model
# properly. Can remove this when it is implemented in the Wrapper base
# class.
#
# Enables end-user to prune the first layer in Sequential models, while
# passing the input shape to the original layer.
#
# tf.keras.Sequential(
# prune_masked_weight(tf.keras.layers.Dense(2, input_shape=(3,)))
# )
#
# as opposed to
#
# tf.keras.Sequential(
# prune_masked_weight(tf.keras.layers.Dense(2), input_shape=(3,))
# )
#
# Without this code, the pruning wrapper doesn't have an input
# shape and being the first layer, this causes the model to not be
# built. Being not built is confusing since the end-user has passed an
# input shape.
if not hasattr(self, '_batch_input_shape') and hasattr(
layer, '_batch_input_shape'):
self._batch_input_shape = self.layer._batch_input_shape
#metrics.MonitorBoolGauge('prune_masked_weight_wrapper_usage').set(
# layer.__class__.__name__)
def build(self, input_shape):
super(PruneMaskedWeight, self).build(input_shape)
weight_vars, mask_vars, = [], []
# For each of the weights, add mask variables.
for weight in self.layer.weights:
# res2a_branch2a/kernel:0 -> kernel
weight_name = weight.name.split('/')[-1].split(':')[0]
mask = self.add_weight(
weight_name + '_mask',
shape=weight.shape,
initializer=tf.keras.initializers.get('ones'),
dtype=weight.dtype,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
weight_vars.append(weight)
mask_vars.append(mask)
pruning_vars = list(zip(weight_vars, mask_vars))
# Create a pruning object
self.pruning_obj = pruning_impl.Pruning(pruning_vars)
def call(self, inputs, training=None, **kwargs):
if training is None:
training = K.learning_phase()
# Always execute the op that performs weights = weights * mask
# Relies on UpdatePruningStep callback to ensure the weights
# are sparse after the final backpropagation.
#
# self.add_update does nothing during eager execution.
self.add_update(self.pruning_obj.weight_mask_op())
# TODO(evcu) remove this check after dropping py2 support. In py3 getargspec
# is deprecated.
if hasattr(inspect, 'getfullargspec'):
args = inspect.getfullargspec(self.layer.call).args
else:
args = inspect.getargspec(self.layer.call).args
# Propagate the training bool to the underlying layer if it accepts
# training as an arg.
if 'training' in args:
return self.layer.call(inputs, training=training, **kwargs)
return self.layer.call(inputs, **kwargs)
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def get_config(self):
return super(PruneMaskedWeight, self).get_config()
def from_config(cls, config):
config = config.copy()
deserialize_keras_object = keras.utils.deserialize_keras_object # pylint: disable=g-import-not-at-top
layer = keras.layers.deserialize(config.pop('layer'))
config['layer'] = layer
return cls(**config)
def trainable(self):
return self.layer.trainable
def trainable(self, value):
self.layer.trainable = value
def trainable_weights(self):
return self.layer.trainable_weights
def non_trainable_weights(self):
return self.layer.non_trainable_weights + self._non_trainable_weights
#@property
#def updates(self):
# return self.layer.updates + self._updates
#@property
#def losses(self):
# return self.layer.losses + self._losses
#def get_weights(self):
# return self.layer.get_weights()
#def set_weights(self, weights):
# self.layer.set_weights(weights)
The provided code snippet includes necessary dependencies for implementing the `collect_prunable_layers` function. Write a Python function `def collect_prunable_layers(model)` to solve the following problem:
Recursively collect the prunable layers in the model.
Here is the function:
def collect_prunable_layers(model):
"""Recursively collect the prunable layers in the model."""
return [
layer for layer in model.submodules
if isinstance(layer, PruneMaskedWeight)
] | Recursively collect the prunable layers in the model. |
23,400 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def assign(ref, value, name=None):
if hasattr(tf, 'assign'):
return tf.assign(ref, value, name=name)
else:
return ref.assign(value, name=name) | null |
23,401 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
The provided code snippet includes necessary dependencies for implementing the `initialize_variables` function. Write a Python function `def initialize_variables(testcase)` to solve the following problem:
Handle global variable initialization in TF 1.X. Arguments: testcase: instance of tf.test.TestCase
Here is the function:
def initialize_variables(testcase):
"""Handle global variable initialization in TF 1.X.
Arguments:
testcase: instance of tf.test.TestCase
"""
if hasattr(tf, 'global_variables_initializer') and not tf.executing_eagerly():
testcase.evaluate(tf.global_variables_initializer()) | Handle global variable initialization in TF 1.X. Arguments: testcase: instance of tf.test.TestCase |
23,402 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def is_v1_apis():
return hasattr(tf, 'assign') | null |
23,403 | import sys
from tensorflow.python.eager import context
from tensorflow.python.util import nest
def make_quantized(base):
class Inspectable(base):
class Attr:
SavingOutputs = '_saving_outputs'
SavedOutputs = '_saved_outputs'
def __call__(self, *args, **kwargs):
outputs = super(Inspectable, self).__call__(*args, **kwargs)
if getattr(self, self.Attr.SavingOutputs,
False) and context.executing_eagerly():
self._save(outputs)
return outputs
def enable_saving_outputs(self):
setattr(self, self.Attr.SavingOutputs, True)
def disable_saving_outputs(self):
setattr(self, self.Attr.SavingOutputs, False)
def _save(self, tensors):
if not hasattr(self, self.Attr.SavedOutputs):
setattr(self, self.Attr.SavedOutputs, [])
batch_outputs = []
for tensor in nest.flatten(tensors):
batch_outputs.append(tensor.numpy())
saved_outputs = getattr(self, self.Attr.SavedOutputs)
saved_outputs.append(batch_outputs)
def saved_outputs(self):
return getattr(self, self.Attr.SavedOutputs, [])
cls_name = base.__name__
quantized_cls = type(cls_name, (Inspectable,), {})
# Use base class __init__ directly so that the `KerasWriter` can inspect init args
# automatically and write them out correctly.
quantized_cls.__init__ = base.__init__
setattr(sys.modules[__name__], cls_name, quantized_cls)
return quantized_cls | null |
23,404 | from tensorflow.python.ops.signal import fft_ops
def rfft(input_tensor, fft_length=None, name=None):
return fft_ops.rfft(input_tensor, fft_length, name) | null |
23,405 | from tensorflow.python.ops.signal import fft_ops
def irfft(input_tensor, fft_length=None, name=None):
return fft_ops.irfft(input_tensor, fft_length, name) | null |
23,406 | from tensorflow.python.ops.signal import fft_ops
def ifft(input, name=None):
return fft_ops.ifft(input, name) | null |
23,407 | from tensorflow.python.ops import array_ops
def gather(params, indices, axis=None, batch_dims=0, name=None):
return array_ops.gather_v2(
params, indices, axis=axis, batch_dims=batch_dims, name=name) | null |
23,408 | import numpy as np
import os
import tensorflow as tf
from collections import OrderedDict
from tensorflow.keras import activations
from tensorflow.keras import layers as keras_layers
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tf_nndct import layers as nndct_layers
from tf_nndct import ops as nndct_ops
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.ops.signal import fft_ops
from tf_nndct.quantization import utils as quant_utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tf_utils
def _get_module(object):
module = tf_inspect.getmodule(object)
module_full_name = module.__name__
if '.' in module_full_name:
pkg, module_name = module_full_name.rsplit('.', 1)
else:
pkg = module_full_name
module_name = module_full_name
return (module, pkg, module_name) | null |
23,409 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tf_nndct.graph import ops
from tf_nndct.utils import generic_utils
from tf_nndct.utils import viz
def write_proto(path, message, as_text=False):
dir_name = os.path.dirname(path)
generic_utils.mkdir_if_not_exist(dir_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def write_text_proto(path, message):
write_proto(path, message, as_text=True) | null |
23,410 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tf_nndct.graph import ops
from tf_nndct.utils import generic_utils
from tf_nndct.utils import viz
def op_param_by_name(op, name):
for param in op.ParamName:
if param.value == name:
return param
return None | null |
23,411 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tf_nndct.graph import ops
from tf_nndct.utils import generic_utils
from tf_nndct.utils import viz
def stringfy_to_write(x):
if isinstance(x, str):
x = "'{}'".format(x)
return '{}'.format(x) | null |
23,412 | import json
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.core.protobuf import config_pb2
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tf_nndct.graph import OpTypes
from tf_nndct.graph import converter
from tf_nndct.graph import ops
from tf_nndct.graph import refiner
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
keras = tf.keras
def _parent_scope(scope):
# Given 'model/dense/MatMul', return 'model/dense'.
return scope.rsplit('/', 1)[0]
The provided code snippet includes necessary dependencies for implementing the `belongs_to_keras_layer` function. Write a Python function `def belongs_to_keras_layer(op, scope_to_layer)` to solve the following problem:
Get the keras layer the given op is generated from. Returns None if op does not belong to any layer. Trace back from current scope to parent scope recursively until it reaches the outermost scope.
Here is the function:
def belongs_to_keras_layer(op, scope_to_layer):
"""Get the keras layer the given op is generated from.
Returns None if op does not belong to any layer.
Trace back from current scope to parent scope recursively until it reaches
the outermost scope.
"""
if not scope_to_layer:
return None
layer = None
scope = op.name
while True:
if scope in scope_to_layer:
layer = scope_to_layer[scope][0]
break
parent_scope = _parent_scope(scope)
# Already to the outtest scope.
if parent_scope == scope:
break
scope = parent_scope
# Lambda layer is a wrapper, we need to parse ops in the layer individually.
if type(layer) == keras.layers.Lambda or isinstance(layer, keras.Sequential):
layer = None
return layer | Get the keras layer the given op is generated from. Returns None if op does not belong to any layer. Trace back from current scope to parent scope recursively until it reaches the outermost scope. |
23,413 | import json
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.core.protobuf import config_pb2
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tf_nndct.graph import OpTypes
from tf_nndct.graph import converter
from tf_nndct.graph import ops
from tf_nndct.graph import refiner
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import logging
from tf_nndct.utils import tf_utils
import logging as _logging
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
from logging import NOTSET
The provided code snippet includes necessary dependencies for implementing the `get_layer_inbound_nodes` function. Write a Python function `def get_layer_inbound_nodes(layer_parent_pairs)` to solve the following problem:
Get layer's inbound nodes. The config of a layer does not include connectivity information, nor the layer class name. These are handled by keras.Model. So we extract them from model's config and associate them to the corresponding layer.
Here is the function:
def get_layer_inbound_nodes(layer_parent_pairs):
"""Get layer's inbound nodes.
The config of a layer does not include connectivity information,
nor the layer class name. These are handled by keras.Model.
So we extract them from model's config and associate them to the
corresponding layer.
"""
layer_inbound_nodes = {}
model = None
# Get a keras model which is a top-level layer.
for layer, parent_layer in layer_parent_pairs:
if parent_layer is None:
model = layer
break
if getattr(model, '_is_graph_network', None):
# Only graph network has get_config.
model_config = model.get_config()
logging.vlog(4, 'model_config: {}'.format(model_config))
if 'layers' in model_config:
layers_config = model_config['layers']
for config in layers_config:
if 'inbound_nodes' in config:
layer_inbound_nodes[config['name']] = config['inbound_nodes']
return layer_inbound_nodes | Get layer's inbound nodes. The config of a layer does not include connectivity information, nor the layer class name. These are handled by keras.Model. So we extract them from model's config and associate them to the corresponding layer. |
23,414 | import imp
from tensorflow import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
from nndct_shared.pruning import pruning_lib
from tf_nndct.graph import OpTypes
from tf_nndct.graph import parser
from tf_nndct.graph import utils
from tf_nndct.graph import writer as writer_lib
from tf_nndct.layers import base_layer
from tf_nndct.utils import keras_utils
from tf_nndct.utils import logging
from tf_nndct.utils import tensor_utils
class KerasBuilder(object):
def __init__(self, graph):
self._graph = graph
def build(self, filepath, quantized=False, as_layer=False):
class_name = 'Model'
base_class = keras.Model
call_fn_name = 'call'
if as_layer:
if quantized:
base_class = base_layer.Layer
call_fn_name = '_internal_call'
else:
base_class = keras.layers.Layer
class_spec = ClassSpec(class_name, base_class, call_fn_name, quantized)
writer = writer_lib.GraphCodeGenerator(self._graph, class_spec)
layer_to_node = writer.write(filepath)
# TODO(yuwang): Use code below.
#py_module_name = "_".join(["nndct", module_name])
#spec = importlib.util.spec_from_file_location(py_module_name, filepath)
#py_module = importlib.util.module_from_spec(spec)
#sys.modules[py_module_name] = py_module
#spec.loader.exec_module(py_module)
#rebuilt_module = py_module.__dict__[module_name]()
loaded_module = imp.load_source('nndct_rebuilt_model', filepath)
rebuilt_model = getattr(loaded_module, class_name)()
dummy_inputs = []
for spec in nest.flatten(self._graph.input_signature):
logging.vlog(1, spec)
dummy_inputs.append(array_ops.ones(spec.shape, dtype=spec.dtype))
dummy_inputs = nest.pack_sequence_as(self._graph.input_signature,
dummy_inputs)
# Call the subclassed model once to build the model (mainly to create the weights)
#input_data = dummy_inputs if len(dummy_inputs) > 1 else dummy_inputs[0]
rebuilt_model(*dummy_inputs)
layer_nodes = []
# Reload weights
for layer_name, node in layer_to_node.items():
layer = getattr(rebuilt_model, layer_name)
# If there is a ParamName definition, then map ParamName's member to
# keras layer's param; If there is no ParamName,
# then export params in the order they are saved in the op.
weights = []
if hasattr(node.op, 'ParamName'):
named_weights = keras_utils.get_named_weights(layer)
for name in named_weights:
param = utils.op_param_by_name(node.op, name)
if not param:
raise ValueError('Can not get value of "{}" in node({})'.format(
name, node.name))
ndarray = tensor_utils.param_to_tf_numpy(node.op.get_param(param))
if pruning_lib.is_transpose_conv(node.op):
ndarray = tensor_utils.transposeconv_weight_dim_trans(ndarray)
weights.append(ndarray)
logging.vlog(
2, 'Reload weights of {}: name={}, shape={}'.format(
layer.name, name, ndarray.shape))
else:
for name, tensor in node.op.params.items():
ndarray = tensor_utils.param_to_tf_numpy(tensor)
if pruning_lib.is_transpose_conv(node.op):
ndarray = tensor_utils.transposeconv_weight_dim_trans(ndarray)
weights.append(ndarray)
logging.vlog(
2, "Reload weights of {}: name={}, shape={}".format(
layer.name, name, ndarray.shape))
layer_nodes.append((layer, node))
if weights:
layer.set_weights(weights)
return rebuilt_model, layer_nodes
def rebuild_model(model, input_signature, path=None):
graph = parser.from_keras_model(model, input_signature)
builder = KerasBuilder(graph)
if not path:
path = '{}_rebuilt.py'.format(model.name)
rebuilt_model, layer_names = builder.build(path)
return rebuilt_model | null |
23,415 | import numpy as np
from enum import Enum
from tensorflow.core.framework import types_pb2
def from_numpy(dtype):
return _NP_TO_NNDCT[dtype] | null |
23,416 | import numpy as np
from enum import Enum
from tensorflow.core.framework import types_pb2
_NNDCT_TO_NP = {
DType.FLOAT: np.float32,
DType.FLOAT16: np.float16,
DType.DOUBLE: np.float64,
DType.INT32: np.int32,
DType.INT16: np.int16,
DType.INT8: np.int8,
DType.UINT8: np.uint8,
DType.UINT16: np.uint16,
DType.INT64: np.int64,
DType.UINT64: np.uint64,
DType.BOOL: bool
}
def to_numpy(dtype):
return _NNDCT_TO_NP[dtype] | null |
23,417 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
_tf_type_to_nndct = {
'Add': op_def.TFAdd,
'AddV2': op_def.TFAdd,
'BiasAdd': op_def.TFBiasAdd,
'Identity': op_def.TFIdentity,
'NoOp': op_def.TFNoOp,
'Reshape': op_def.TFReshape,
'Sigmoid': op_def.TFSigmoid,
'Tanh': op_def.TFTanh,
'GatherV2': op_def.TFGather,
'RFFT': op_def.TFRFFT,
'ComplexAbs': op_def.TFComplexAbs,
'Angle': op_def.TFAngle,
'Exp': op_def.TFExp,
'IRFFT': op_def.TFIRFFT,
'Pad': op_def.TFPad,
'Transpose': op_def.TFTranspose,
'Sum': op_def.TFSum,
'reshape': op_def.TFReshape,
'concat': op_def.TFConcat,
'ConcatV2': op_def.TFConcat,
'__operators__.add': op_def.TFAdd,
}
def create_node(name, op, input_names, output_names):
def convert_simple_tf_op(node):
op = _tf_type_to_nndct[node.type]()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,418 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
if tf_utils.is_tf_version_greater_equal('2.9.0'):
from keras.layers.rnn import lstm as recurrent_v2
from keras.layers.rnn import lstm_v1 as recurrent
elif tf_utils.is_tf_version_greater_equal('2.6'):
# Keras is seperate from tensorflow since tf 2.6
from keras.layers import recurrent
from keras.layers import recurrent_v2
else:
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.layers import recurrent_v2
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
if tf_utils.is_tf_version_greater_equal('2.6'):
normalization_layer = layers.Normalization
else:
normalization_layer = layers.experimental.preprocessing.Normalization
if tf_utils.is_tf_version_greater_equal('2.6'):
rescaling_layer = layers.Rescaling
else:
rescaling_layer = layers.experimental.preprocessing.Rescaling
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_op_placeholder(node):
config = node.get_config()
shape = tf_utils.tf_shape_to_list(config['shape'])
dtype = dtypes.from_tf(config['dtype'])
op = (
OpBuilder(op_def.TFInput).config('shape', shape).config('dtype',
dtype).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,419 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_node(name, op, input_names, output_names):
def convert_op_cast(node):
config = node.get_config()
op = (
OpBuilder(op_def.TFCast, None, None).config('dtype',
config['DstT']).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,420 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
if tf_utils.is_tf_version_greater_equal('2.9.0'):
from keras.layers.rnn import lstm as recurrent_v2
from keras.layers.rnn import lstm_v1 as recurrent
elif tf_utils.is_tf_version_greater_equal('2.6'):
# Keras is seperate from tensorflow since tf 2.6
from keras.layers import recurrent
from keras.layers import recurrent_v2
else:
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.layers import recurrent_v2
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
if tf_utils.is_tf_version_greater_equal('2.6'):
normalization_layer = layers.Normalization
else:
normalization_layer = layers.experimental.preprocessing.Normalization
if tf_utils.is_tf_version_greater_equal('2.6'):
rescaling_layer = layers.Rescaling
else:
rescaling_layer = layers.experimental.preprocessing.Rescaling
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_op_const(node):
config = node.get_config()
ndarray = tf_utils.values_from_tf_const(node.op.node_def)
# Save ndarray or raw tf.Tensor?
config['value'] = ndarray
op = (
OpBuilder(op_def.TFConst, config, None).param(node.name, ndarray).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,421 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_op_mul(node):
op = OpBuilder(op_def.TFMultiply).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,422 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_node(name, op, input_names, output_names):
def convert_op_strided_slice(node):
op = (OpBuilder(op_def.TFStridedSlice, node.get_config()).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,423 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_op_matmul(node):
# Parse MatMul to Dense without bias.
op = (OpBuilder(op_def.TFDense).config('use_bias', False).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,424 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
The provided code snippet includes necessary dependencies for implementing the `convert_layer_normalization` function. Write a Python function `def convert_layer_normalization(node)` to solve the following problem:
Convert layers.Normalization to GeTFNormalizationneric
Here is the function:
def convert_layer_normalization(node):
"""
Convert layers.Normalization to GeTFNormalizationneric
"""
params = node.get_params()
if 'count' in params:
params['count'] = np.array(params['count'])
# del params['count'] # 0
op = OpBuilder(op_def.TFNormalization, node.get_config(), params).build()
return create_node(node.name, op, node.input_names, node.output_names) | Convert layers.Normalization to GeTFNormalizationneric |
23,425 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
The provided code snippet includes necessary dependencies for implementing the `convert_layer_rescaling` function. Write a Python function `def convert_layer_rescaling(node)` to solve the following problem:
Convert layers.Rescaling to TFRescaling
Here is the function:
def convert_layer_rescaling(node):
"""
Convert layers.Rescaling to TFRescaling
"""
params = node.get_params()
op = OpBuilder(op_def.TFRescaling, node.get_config(), params).build()
return create_node(node.name, op, node.input_names, node.output_names) | Convert layers.Rescaling to TFRescaling |
23,426 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_node(name, op, input_names, output_names):
def convert_layer_batchnorm(node):
config = node.get_config()
params = node.get_params()
# See https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/python/keras/layers/normalization.py#L358
if config['scale']:
param_shape = params['gamma'].shape
elif config['center']:
param_shape = params['beta'].shape
else:
param_shape = params['moving_mean'].shape
for dim in param_shape:
if dim != 1:
out_dim = dim
break
config_axis = config['axis']
axis = config_axis[0] if isinstance(config_axis, list) else config_axis
op = (
OpBuilder(op_def.TFBatchNorm, config,
params).attr('out_dim', out_dim).attr('axis', axis).build())
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,427 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_activation_node(kernel_node, activation, output_names):
def convert_layer_dense(node):
config = node.get_config()
params = node.get_params()
op = (
OpBuilder(op_def.TFDense, config, params).config('activation', None).attr(
'activation',
config['activation']).attr('in_dim',
params['kernel'].shape[0]).build())
dense_node = ops.Node(node.name, op)
dense_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(dense_node, config['activation'],
node.output_names)
return [dense_node, actv_node]
else:
dense_node.output_names = copy.deepcopy(node.output_names)
return dense_node | null |
23,428 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def _convert_node_to_generic(node):
_tf_type_to_nndct = {
'Add': op_def.TFAdd,
'AddV2': op_def.TFAdd,
'BiasAdd': op_def.TFBiasAdd,
'Identity': op_def.TFIdentity,
'NoOp': op_def.TFNoOp,
'Reshape': op_def.TFReshape,
'Sigmoid': op_def.TFSigmoid,
'Tanh': op_def.TFTanh,
'GatherV2': op_def.TFGather,
'RFFT': op_def.TFRFFT,
'ComplexAbs': op_def.TFComplexAbs,
'Angle': op_def.TFAngle,
'Exp': op_def.TFExp,
'IRFFT': op_def.TFIRFFT,
'Pad': op_def.TFPad,
'Transpose': op_def.TFTranspose,
'Sum': op_def.TFSum,
'reshape': op_def.TFReshape,
'concat': op_def.TFConcat,
'ConcatV2': op_def.TFConcat,
'__operators__.add': op_def.TFAdd,
}
def create_node(name, op, input_names, output_names):
def convert_tf_op_lambda(node):
op_name = node.get_config()["function"]
if op_name in _tf_type_to_nndct:
op = OpBuilder(_tf_type_to_nndct[op_name], node.get_config(), None).build()
return create_node(node.name, op, node.input_names, node.output_names)
else:
return _convert_node_to_generic(node) | null |
23,429 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def _convert_node_to_generic(node):
"""Convert a node with unregistered type to a generic node by
saving the node's config as-is.
"""
op = OpBuilder(op_def.TFGeneric, node.get_config(),
node.get_params()).attr('layer_class', type(node.op)).build()
return create_node(node.name, op, node.input_names, node.output_names)
_tf_type_to_nndct = {
'Add': op_def.TFAdd,
'AddV2': op_def.TFAdd,
'BiasAdd': op_def.TFBiasAdd,
'Identity': op_def.TFIdentity,
'NoOp': op_def.TFNoOp,
'Reshape': op_def.TFReshape,
'Sigmoid': op_def.TFSigmoid,
'Tanh': op_def.TFTanh,
'GatherV2': op_def.TFGather,
'RFFT': op_def.TFRFFT,
'ComplexAbs': op_def.TFComplexAbs,
'Angle': op_def.TFAngle,
'Exp': op_def.TFExp,
'IRFFT': op_def.TFIRFFT,
'Pad': op_def.TFPad,
'Transpose': op_def.TFTranspose,
'Sum': op_def.TFSum,
'reshape': op_def.TFReshape,
'concat': op_def.TFConcat,
'ConcatV2': op_def.TFConcat,
'__operators__.add': op_def.TFAdd,
}
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_tensorflow_op_layer(node):
op_name = node.get_config()['node_def']['op']
if op_name in _tf_type_to_nndct:
op = OpBuilder(_tf_type_to_nndct[op_name], node.get_config()).build()
return create_node(node.name, op, node.input_names, node.output_names)
else:
return _convert_node_to_generic(node) | null |
23,430 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_add(node):
op = OpBuilder(op_def.TFAdd, node.get_config(), None).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,431 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
def config(self, name, value):
def attr(self, name, value):
def param(self, name, value):
def build(self):
def create_node(name, op, input_names, output_names):
def convert_layer_sub(node):
op = OpBuilder(op_def.TFSubtract, node.get_config(), None).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,432 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_multiply(node):
op = OpBuilder(op_def.TFMultiplyLayer, node.get_config(), None).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,433 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_embedding(node):
op = OpBuilder(op_def.TFEmbedding, node.get_config(),
node.get_params()).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,434 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def _parse_base_rnn(layer):
if keras_utils.is_stacked_rnn_cells(layer.cell):
cell_op = _parse_stacked_rnn_cells(layer.cell)
else:
cell_op = _parse_rnn_layer(layer.cell)
op = (
OpBuilder(op_def.TFRNN, layer.get_config(), None).config('cell',
cell_op).build())
# Inherit params from cell op.
for name, tensor in cell_op.params.items():
op.set_param(name, tensor)
return op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_wrapper_bidirectional(node):
layer = node.op
forward_op = _parse_base_rnn(layer.forward_layer)
backward_op = _parse_base_rnn(layer.backward_layer)
backward_op.set_config('go_backwards', True)
op = (
OpBuilder(op_def.TFBidirectional, node.get_config(),
None).config('layer',
forward_op).config('backward_layer',
backward_op).build())
combined_params = {}
for name, tensor in forward_op.params.items():
combined_params['forward_rnn/' + name] = tensor
for name, tensor in backward_op.params.items():
combined_params['backward_rnn/' + name] = tensor
for name, tensor in combined_params.items():
op.set_param(name, tensor)
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,435 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
def _parse_base_rnn(layer):
def create_node(name, op, input_names, output_names):
def convert_layer_rnn(node):
layer = node.op
op = _parse_base_rnn(layer)
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,436 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
def _parse_rnn_layer(layer):
config = layer.get_config()
params = keras_utils.get_named_weights(layer)
# Naive method to determine if kernel weights are concated together.
splited_params = {}
units = config['units']
if list(params.values())[0].shape[-1] == units * 4:
suffix = ['_i', '_f', '_c', '_o']
for name, value in params.items():
for i in range(len(suffix)):
splited_params[name + suffix[i]] = np.copy(value[..., i *
units:(i + 1) * units])
else:
splited_params = params
op = (
OpBuilder(ops.Operation, config, splited_params,
_rnn_layer_to_op[type(layer)]).build())
return op
recurrent.LSTM, recurrent_v2.LSTM, recurrent.LSTMCell, recurrent_v2.LSTMCell
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_lstm(node):
layer = node.op
if type(layer) in [recurrent.LSTM, recurrent_v2.LSTM]:
cell = layer.cell
cell_cls = [recurrent.LSTMCell, recurrent_v2.LSTMCell]
if type(cell) not in cell_cls:
raise NotImplementedError(
'Custom LSTM cell is not supported. Expected {}, but got {}'.format(
cell_cls, type(cell)))
if cell.recurrent_activation == activations.hard_sigmoid:
raise ValueError(
'recurrent_activation="hard_sigmoid" is not allowd, use "sigmoid" instead.'
)
op = _parse_rnn_layer(node.op)
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,437 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_simplernn(node):
op = OpBuilder(op_def.TFSimpleRNN, node.get_config(),
node.get_params()).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,438 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_node(name, op, input_names, output_names):
node = ops.Node(name, op)
node.input_names[:] = input_names
node.output_names[:] = output_names
return node
def convert_layer_gru(node):
op = OpBuilder(op_def.TFGRU, node.get_config(), node.get_params()).build()
return create_node(node.name, op, node.input_names, node.output_names) | null |
23,439 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import layers
from tensorflow.python.util import nest
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import op_def
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import keras_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tensor_utils
from tf_nndct.utils import tf_utils
class OpBuilder(object):
_OP_COUNT = {}
# TODO(yuwang): Maybe use op object instead of op class ?
def __init__(self, op_cls, config=None, params=None, *args, **kwargs):
self._op = op_cls(*args, **kwargs)
self._config = config if config else {}
self._params = params if params else {}
self._AttrName = op_cls.AttrName if hasattr(op_cls, 'AttrName') else None
self._ParamName = op_cls.ParamName if hasattr(op_cls, 'ParamName') else None
op_type = self._op.type
if op_type not in self._OP_COUNT:
self._OP_COUNT[op_type] = 0
self._OP_COUNT[op_type] = self._OP_COUNT[op_type] + 1
for name, value in self._config.items():
self._op.set_config(name, value)
for name, value in self._params.items():
self.param(name, value)
def config(self, name, value):
self._op.set_config(name, value)
return self
def attr(self, name, value):
if not self._AttrName:
raise ValueError('Op {} does not has any attributes'.format(
type(self._op)))
for attr in self._AttrName:
if name == attr.value:
break
self._op.set_attr(attr, value)
return self
def param(self, name, value):
if not self._ParamName:
param = name
else:
param = utils.op_param_by_name(self._op, name)
if not param:
raise ValueError('{} does not has a param named "{}"'.format(
type(self._op), name))
op_type = self._op.type
index = self._OP_COUNT[op_type] - 1
# Naming tensor like dense_0:weight
name = '{}_{}:{}'.format(op_type, index, name)
tensor = tensor_utils.param_from_tf_numpy(name, value)
self._op.set_param(param, tensor)
return self
def build(self):
return self._op
def create_activation_node(kernel_node, activation, output_names):
actv = activations.get(activation)
#op = ops.Operation(_activation_cvt_map[actv])
op = _activation_cvt_map[actv]()
actv_name = activations.serialize(actv)
actv_node = ops.Node('/'.join([kernel_node.name, actv_name]), op)
# Connect the activation to the kernel
kernel_out_tensor = kernel_node.name + ':0'
kernel_node.output_names = [kernel_out_tensor]
actv_node.input_names = [kernel_out_tensor]
actv_node.output_names = copy.deepcopy(output_names)
# Dettach activation node from its kernel node.
# The layer name of activation node should not be set to kernel's layer name.
actv_node.layer_name = _NO_LAYER_NAME
return actv_node
def convert_layer_conv2d(node):
config = node.get_config()
params = node.get_params()
# [*kernel_size, input_channels / groups, filters]
op = (
OpBuilder(op_def.TFConv2D,
config, params).config('activation', None).attr(
'activation', config['activation']).attr(
'in_dim', params['kernel'].shape[-2]).build())
conv_node = ops.Node(node.name, op)
conv_node.input_names = copy.deepcopy(node.input_names)
if config['activation']:
actv_node = create_activation_node(conv_node, config['activation'],
node.output_names)
return [conv_node, actv_node]
else:
conv_node.output_names = copy.deepcopy(node.output_names)
return conv_node | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.