diff --git a/lib/python3.10/site-packages/numba/typed/__init__.py b/lib/python3.10/site-packages/numba/typed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..62004deb0a7c8868993b48ac4597a24433ce449c --- /dev/null +++ b/lib/python3.10/site-packages/numba/typed/__init__.py @@ -0,0 +1,20 @@ +import importlib + + +_delayed_symbols = { + "Dict": ".typeddict", + "List": ".typedlist", +} + + +def __getattr__(name): + # Uses PEP-562 but requires python>3.6 + if name in _delayed_symbols: + modpath = _delayed_symbols[name] + mod = importlib.import_module(modpath, __name__) + return getattr(mod, name) + else: + try: + return importlib.import_module(f".{name}", __name__) + except ModuleNotFoundError: + raise AttributeError diff --git a/lib/python3.10/site-packages/numba/typed/py.typed b/lib/python3.10/site-packages/numba/typed/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/numba/typed/typedlist.py b/lib/python3.10/site-packages/numba/typed/typedlist.py new file mode 100644 index 0000000000000000000000000000000000000000..2c90dabeac43c5728ab2242e5ac75b7ad0e266c2 --- /dev/null +++ b/lib/python3.10/site-packages/numba/typed/typedlist.py @@ -0,0 +1,688 @@ +""" +Python wrapper that connects CPython interpreter to the Numba typed-list. + +This is the code that is used when creating typed lists outside of a `@jit` +context and when returning a typed-list from a `@jit` decorated function. It +basically a Python class that has a Numba allocated typed-list under the hood +and uses `@jit` functions to access it. Since it inherits from MutableSequence +it should really quack like the CPython `list`. + +""" +from collections.abc import MutableSequence + +from numba.core.types import ListType +from numba.core.imputils import numba_typeref_ctor +from numba.core.dispatcher import Dispatcher +from numba.core import types, config, cgutils +from numba import njit, typeof +from numba.core.extending import ( + overload, + box, + unbox, + NativeValue, + type_callable, + overload_classmethod, +) +from numba.typed import listobject +from numba.core.errors import TypingError, LoweringError +from numba.core.typing.templates import Signature +import typing as pt + + +Int_or_Slice = pt.Union["pt.SupportsIndex", slice] + + +T_co = pt.TypeVar('T_co', covariant=True) + + +class _Sequence(pt.Protocol[T_co]): + def __getitem__(self, i: int) -> T_co: + ... + + def __len__(self) -> int: + ... + + +DEFAULT_ALLOCATED = listobject.DEFAULT_ALLOCATED + + +@njit +def _make_list(itemty, allocated=DEFAULT_ALLOCATED): + return listobject._as_meminfo(listobject.new_list(itemty, + allocated=allocated)) + + +@njit +def _length(l): + return len(l) + + +@njit +def _allocated(l): + return l._allocated() + + +@njit +def _is_mutable(l): + return l._is_mutable() + + +@njit +def _make_mutable(l): + return l._make_mutable() + + +@njit +def _make_immutable(l): + return l._make_immutable() + + +@njit +def _append(l, item): + l.append(item) + + +@njit +def _setitem(l, i, item): + l[i] = item + + +@njit +def _getitem(l, i): + return l[i] + + +@njit +def _contains(l, item): + return item in l + + +@njit +def _count(l, item): + return l.count(item) + + +@njit +def _pop(l, i): + return l.pop(i) + + +@njit +def _delitem(l, i): + del l[i] + + +@njit +def _extend(l, iterable): + return l.extend(iterable) + + +@njit +def _insert(l, i, item): + l.insert(i, item) + + +@njit +def _remove(l, item): + l.remove(item) + + +@njit +def _clear(l): + l.clear() + + +@njit +def _reverse(l): + l.reverse() + + +@njit +def _copy(l): + return l.copy() + + +@njit +def _eq(t, o): + return t == o + + +@njit +def _ne(t, o): + return t != o + + +@njit +def _lt(t, o): + return t < o + + +@njit +def _le(t, o): + return t <= o + + +@njit +def _gt(t, o): + return t > o + + +@njit +def _ge(t, o): + return t >= o + + +@njit +def _index(l, item, start, end): + return l.index(item, start, end) + + +@njit +def _sort(l, key, reverse): + return l.sort(key, reverse) + + +def _from_meminfo_ptr(ptr, listtype): + return List(meminfo=ptr, lsttype=listtype) + + +T = pt.TypeVar('T') +T_or_ListT = pt.Union[T, 'List[T]'] + + +class List(MutableSequence, pt.Generic[T]): + """A typed-list usable in Numba compiled functions. + + Implements the MutableSequence interface. + """ + + _legal_kwargs = ["lsttype", "meminfo", "allocated"] + + def __new__(cls, + *args, + lsttype=None, + meminfo=None, + allocated=DEFAULT_ALLOCATED, + **kwargs): + if config.DISABLE_JIT: + return list(*args, **kwargs) + else: + return object.__new__(cls) + + @classmethod + def empty_list(cls, item_type, allocated=DEFAULT_ALLOCATED): + """Create a new empty List. + + Parameters + ---------- + item_type: Numba type + type of the list item. + allocated: int + number of items to pre-allocate + """ + if config.DISABLE_JIT: + return list() + else: + return cls(lsttype=ListType(item_type), allocated=allocated) + + def __init__(self, *args, **kwargs): + """ + For users, the constructor does not take any parameters. + The keyword arguments are for internal use only. + + Parameters + ---------- + args: iterable + The iterable to initialize the list from + lsttype : numba.core.types.ListType; keyword-only + Used internally for the list type. + meminfo : MemInfo; keyword-only + Used internally to pass the MemInfo object when boxing. + allocated: int; keyword-only + Used internally to pre-allocate space for items + """ + illegal_kwargs = any((kw not in self._legal_kwargs for kw in kwargs)) + if illegal_kwargs or args and kwargs: + raise TypeError("List() takes no keyword arguments") + if kwargs: + self._list_type, self._opaque = self._parse_arg(**kwargs) + else: + self._list_type = None + if args: + if not 0 <= len(args) <= 1: + raise TypeError( + "List() expected at most 1 argument, got {}" + .format(len(args)) + ) + iterable = args[0] + # Special case Numpy scalars or anything that quacks like a + # NumPy Array. + if hasattr(iterable, "ndim") and iterable.ndim == 0: + self.append(iterable.item()) + else: + try: + iter(iterable) + except TypeError: + raise TypeError("List() argument must be iterable") + for i in args[0]: + self.append(i) + + def _parse_arg(self, lsttype, meminfo=None, allocated=DEFAULT_ALLOCATED): + if not isinstance(lsttype, ListType): + raise TypeError('*lsttype* must be a ListType') + + if meminfo is not None: + opaque = meminfo + else: + opaque = _make_list(lsttype.item_type, allocated=allocated) + return lsttype, opaque + + @property + def _numba_type_(self): + if self._list_type is None: + raise TypeError("invalid operation on untyped list") + return self._list_type + + @property + def _typed(self): + """Returns True if the list is typed. + """ + return self._list_type is not None + + @property + def _dtype(self): + if not self._typed: + raise RuntimeError("invalid operation on untyped list") + return self._list_type.dtype + + def _initialise_list(self, item): + lsttype = types.ListType(typeof(item)) + self._list_type, self._opaque = self._parse_arg(lsttype) + + def __len__(self) -> int: + if not self._typed: + return 0 + else: + return _length(self) + + def _allocated(self): + if not self._typed: + return DEFAULT_ALLOCATED + else: + return _allocated(self) + + def _is_mutable(self): + return _is_mutable(self) + + def _make_mutable(self): + return _make_mutable(self) + + def _make_immutable(self): + return _make_immutable(self) + + def __eq__(self, other): + return _eq(self, other) + + def __ne__(self, other): + return _ne(self, other) + + def __lt__(self, other): + return _lt(self, other) + + def __le__(self, other): + return _le(self, other) + + def __gt__(self, other): + return _gt(self, other) + + def __ge__(self, other): + return _ge(self, other) + + def append(self, item: T) -> None: + if not self._typed: + self._initialise_list(item) + _append(self, item) + + # noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592 + # noqa E704 required to follow overload style of using ... in the same line + @pt.overload # type: ignore[override] + def __setitem__(self, i: int, o: T) -> None: ... # noqa: F811, E704 + @pt.overload + def __setitem__(self, s: slice, o: 'List[T]') -> None: ... # noqa: F811, E704, E501 + + def __setitem__(self, i: Int_or_Slice, item: T_or_ListT) -> None: # noqa: F811, E501 + if not self._typed: + self._initialise_list(item) + _setitem(self, i, item) + + # noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592 + # noqa E704 required to follow overload style of using ... in the same line + @pt.overload + def __getitem__(self, i: int) -> T: ... # noqa: F811, E704 + @pt.overload + def __getitem__(self, i: slice) -> 'List[T]': ... # noqa: F811, E704 + + def __getitem__(self, i: Int_or_Slice) -> T_or_ListT: # noqa: F811 + if not self._typed: + raise IndexError + else: + return _getitem(self, i) + + def __iter__(self) -> pt.Iterator[T]: + for i in range(len(self)): + yield self[i] + + def __contains__(self, item: T) -> bool: # type: ignore[override] + return _contains(self, item) + + def __delitem__(self, i: Int_or_Slice) -> None: + _delitem(self, i) + + def insert(self, i: int, item: T) -> None: + if not self._typed: + self._initialise_list(item) + _insert(self, i, item) + + def count(self, item: T) -> int: + return _count(self, item) + + def pop(self, i: "pt.SupportsIndex" = -1) -> T: + return _pop(self, i) + + def extend(self, iterable: "_Sequence[T]") -> None: #type: ignore[override] + # Empty iterable, do nothing + if len(iterable) == 0: + return None + if not self._typed: + # Need to get the first element of the iterable to initialise the + # type of the list. FIXME: this may be a problem if the iterable + # can not be sliced. + self._initialise_list(iterable[0]) + return _extend(self, iterable) + + def remove(self, item: T) -> None: + return _remove(self, item) + + def clear(self): + return _clear(self) + + def reverse(self): + return _reverse(self) + + def copy(self): + return _copy(self) + + def index(self, item: T, start: pt.Optional[int] = None, + stop: pt.Optional[int] = None) -> int: + return _index(self, item, start, stop) + + def sort(self, key=None, reverse=False): + """Sort the list inplace. + + See also ``list.sort()`` + """ + # If key is not already a dispatcher object, make it so + if callable(key) and not isinstance(key, Dispatcher): + key = njit(key) + return _sort(self, key, reverse) + + def __str__(self): + buf = [] + for x in self: + buf.append("{}".format(x)) + # Check whether the code was invoked from IPython shell + try: + get_ipython + preview = ', '.join(buf[:1000]) + suffix = ', ...' if len(buf) > 1000 else '' + return '[{0}{1}]'.format(preview, suffix) + except (NameError, IndexError): + return '[{0}]'.format(', '.join(buf)) + + def __repr__(self): + body = str(self) + prefix = str(self._list_type) if self._typed else "ListType[Undefined]" + return "{prefix}({body})".format(prefix=prefix, body=body) + + +@overload_classmethod(ListType, 'empty_list') +def typedlist_empty(cls, item_type, allocated=DEFAULT_ALLOCATED): + if cls.instance_type is not ListType: + return + + def impl(cls, item_type, allocated=DEFAULT_ALLOCATED): + return listobject.new_list(item_type, allocated=allocated) + + return impl + + +@box(types.ListType) +def box_lsttype(typ, val, c): + context = c.context + builder = c.builder + + # XXX deduplicate + ctor = cgutils.create_struct_proxy(typ) + lstruct = ctor(context, builder, value=val) + # Returns the plain MemInfo + boxed_meminfo = c.box( + types.MemInfoPointer(types.voidptr), + lstruct.meminfo, + ) + + modname = c.context.insert_const_string( + c.builder.module, 'numba.typed.typedlist', + ) + typedlist_mod = c.pyapi.import_module(modname) + fmp_fn = c.pyapi.object_getattr_string(typedlist_mod, '_from_meminfo_ptr') + + lsttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ)) + + result_var = builder.alloca(c.pyapi.pyobj) + builder.store(cgutils.get_null_value(c.pyapi.pyobj), result_var) + + with builder.if_then(cgutils.is_not_null(builder, lsttype_obj)): + res = c.pyapi.call_function_objargs( + fmp_fn, (boxed_meminfo, lsttype_obj), + ) + c.pyapi.decref(fmp_fn) + c.pyapi.decref(typedlist_mod) + c.pyapi.decref(boxed_meminfo) + builder.store(res, result_var) + return builder.load(result_var) + + +@unbox(types.ListType) +def unbox_listtype(typ, val, c): + context = c.context + builder = c.builder + + # Check that `type(val) is Dict` + list_type = c.pyapi.unserialize(c.pyapi.serialize_object(List)) + valtype = c.pyapi.object_type(val) + same_type = builder.icmp_unsigned("==", valtype, list_type) + + with c.builder.if_else(same_type) as (then, orelse): + with then: + miptr = c.pyapi.object_getattr_string(val, '_opaque') + + native = c.unbox(types.MemInfoPointer(types.voidptr), miptr) + + mi = native.value + ctor = cgutils.create_struct_proxy(typ) + lstruct = ctor(context, builder) + + data_pointer = context.nrt.meminfo_data(builder, mi) + data_pointer = builder.bitcast( + data_pointer, + listobject.ll_list_type.as_pointer(), + ) + + lstruct.data = builder.load(data_pointer) + lstruct.meminfo = mi + + lstobj = lstruct._getvalue() + c.pyapi.decref(miptr) + bb_unboxed = c.builder.basic_block + + with orelse: + # Raise error on incorrect type + c.pyapi.err_format( + "PyExc_TypeError", + "can't unbox a %S as a %S", + valtype, list_type, + ) + bb_else = c.builder.basic_block + + # Phi nodes to gather the output + lstobj_res = c.builder.phi(lstobj.type) + is_error_res = c.builder.phi(cgutils.bool_t) + + lstobj_res.add_incoming(lstobj, bb_unboxed) + lstobj_res.add_incoming(lstobj.type(None), bb_else) + + is_error_res.add_incoming(cgutils.false_bit, bb_unboxed) + is_error_res.add_incoming(cgutils.true_bit, bb_else) + + # cleanup + c.pyapi.decref(list_type) + c.pyapi.decref(valtype) + + return NativeValue(lstobj_res, is_error=is_error_res) + + +# +# The following contains the logic for the type-inferred constructor +# + +def _guess_dtype(iterable): + """Guess the correct dtype of the iterable type. """ + if not isinstance(iterable, types.IterableType): + raise TypingError( + "List() argument must be iterable") + # Special case for nested NumPy arrays. + elif isinstance(iterable, types.Array) and iterable.ndim > 1: + return iterable.copy(ndim=iterable.ndim - 1, layout='A') + elif hasattr(iterable, "dtype"): + return iterable.dtype + elif hasattr(iterable, "yield_type"): + return iterable.yield_type + elif isinstance(iterable, types.UnicodeType): + return iterable + elif isinstance(iterable, types.DictType): + return iterable.key_type + else: + # This should never happen, since the 'dtype' of any iterable + # should have determined above. + raise TypingError( + "List() argument does not have a suitable dtype") + + +@type_callable(ListType) +def typedlist_call(context): + """Defines typing logic for ``List()`` and ``List(iterable)``. + + If no argument is given, the returned typer types a new typed-list with an + undefined item type. If a single argument is given it must be iterable with + a guessable 'dtype'. In this case, the typer types a new typed-list with + the type set to the 'dtype' of the iterable arg. + + Parameters + ---------- + arg : single iterable (optional) + The single optional argument. + + Returns + ------- + typer : function + A typer suitable to type constructor calls. + + Raises + ------ + The returned typer raises a TypingError in case of unsuitable arguments. + + """ + + class Typer(object): + + def attach_sig(self): + from inspect import signature as mypysig + + def mytyper(iterable): + pass + self.pysig = mypysig(mytyper) + + def __call__(self, *args, **kwargs): + if kwargs: + raise TypingError( + "List() takes no keyword arguments" + ) + elif args: + if not 0 <= len(args) <= 1: + raise TypingError( + "List() expected at most 1 argument, got {}" + .format(len(args)) + ) + rt = types.ListType(_guess_dtype(args[0])) + self.attach_sig() + return Signature(rt, args, None, pysig=self.pysig) + else: + item_type = types.undefined + return types.ListType(item_type) + + return Typer() + + +@overload(numba_typeref_ctor) +def impl_numba_typeref_ctor(cls, *args): + """Defines lowering for ``List()`` and ``List(iterable)``. + + This defines the lowering logic to instantiate either an empty typed-list + or a typed-list initialised with values from a single iterable argument. + + Parameters + ---------- + cls : TypeRef + Expecting a TypeRef of a precise ListType. + args: tuple + A tuple that contains a single iterable (optional) + + Returns + ------- + impl : function + An implementation suitable for lowering the constructor call. + + See also: `redirect_type_ctor` in numba/cpython/bulitins.py + """ + list_ty = cls.instance_type + if not isinstance(list_ty, types.ListType): + return # reject + # Ensure the list is precisely typed. + if not list_ty.is_precise(): + msg = "expecting a precise ListType but got {}".format(list_ty) + raise LoweringError(msg) + + item_type = types.TypeRef(list_ty.item_type) + if args: + # special case 0d Numpy arrays + if isinstance(args[0], types.Array) and args[0].ndim == 0: + def impl(cls, *args): + # Instantiate an empty list and populate it with the single + # value from the array. + r = List.empty_list(item_type) + r.append(args[0].item()) + return r + else: + def impl(cls, *args): + # Instantiate an empty list and populate it with values from + # the iterable. + r = List.empty_list(item_type) + for i in args[0]: + r.append(i) + return r + else: + def impl(cls, *args): + # Simply call .empty_list with the item type from *cls* + return List.empty_list(item_type) + + return impl diff --git a/lib/python3.10/site-packages/tf2onnx/__init__.py b/lib/python3.10/site-packages/tf2onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..036d6639424196092a12755b0c878e2c024aceee --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/__init__.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: Apache-2.0 + +"""tf2onnx package.""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +__all__ = ["utils", "graph_matcher", "graph", "graph_builder", + "tfonnx", "shape_inference", "schemas", "tf_utils", "tf_loader", "convert"] + +import onnx +from .version import version as __version__ +from . import verbose_logging as logging +from tf2onnx import tfonnx, utils, graph, graph_builder, graph_matcher, shape_inference, schemas, convert # pylint: disable=wrong-import-order +#from tf2onnx import tf_utils, tf_loader diff --git a/lib/python3.10/site-packages/tf2onnx/constants.py b/lib/python3.10/site-packages/tf2onnx/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..2f41772b833242057f0f70e9a5cd3188393194d7 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/constants.py @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +common constants +""" + +from onnx import helper + +TF2ONNX_PACKAGE_NAME = __name__.split('.')[0] + +# Built-in supported domains +ONNX_DOMAIN = "" +AI_ONNX_ML_DOMAIN = "ai.onnx.ml" +MICROSOFT_DOMAIN = "com.microsoft" +CONTRIB_OPS_DOMAIN = "ai.onnx.contrib" + +# Default opset version for onnx domain +PREFERRED_OPSET = 9 + +# Default opset for custom ops +TENSORFLOW_OPSET = helper.make_opsetid("ai.onnx.converters.tensorflow", 1) + +# Target for the generated onnx graph. It possible targets: +# onnx-1.1 = onnx at v1.1 (winml in rs4 is based on this) +# caffe2 = include some workarounds for caffe2 and winml +TARGET_RS4 = "rs4" +TARGET_RS5 = "rs5" +TARGET_RS6 = "rs6" +TARGET_CAFFE2 = "caffe2" +TARGET_TENSORRT = "tensorrt" +POSSIBLE_TARGETS = [TARGET_RS4, TARGET_RS5, TARGET_RS6, TARGET_CAFFE2, TARGET_TENSORRT] +DEFAULT_TARGET = [] + +NCHW_TO_NHWC = [0, 2, 3, 1] +NHWC_TO_NCHW = [0, 3, 1, 2] +NDHWC_TO_NCDHW = [0, 4, 1, 2, 3] +NCDHW_TO_NDHWC = [0, 2, 3, 4, 1] +HWCN_TO_NCHW = [3, 2, 0, 1] +NCHW_TO_HWCN = [2, 3, 1, 0] + +# Environment variables +ENV_TF2ONNX_DEBUG_MODE = "TF2ONNX_DEBUG_MODE" + +# Mapping opset to IR version. +# Note: opset 7 and opset 8 came out with IR3 but we need IR4 because of PlaceholderWithDefault +OPSET_TO_IR_VERSION = { + 1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7, 13: 7 +} diff --git a/lib/python3.10/site-packages/tf2onnx/convert.py b/lib/python3.10/site-packages/tf2onnx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..3ea67a01eb39cac95e2ff8ad3b8900c31f5502c9 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/convert.py @@ -0,0 +1,467 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +python -m tf2onnx.convert : api and commandline tool to convert a tensorflow model to onnx +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +# pylint: disable=unused-argument,unused-import,ungrouped-imports,wrong-import-position + +import argparse +import os +import sys +from distutils.version import LooseVersion + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" + +import tensorflow as tf + +from tf2onnx.tfonnx import process_tf_graph +from tf2onnx import constants, logging, utils, optimizer +from tf2onnx import tf_loader +from tf2onnx.graph import ExternalTensorStorage +from tf2onnx.tf_utils import compress_graph_def + + + +# pylint: disable=unused-argument + +_HELP_TEXT = """ +Usage Examples: + +python -m tf2onnx.convert --saved-model saved_model_dir --output model.onnx +python -m tf2onnx.convert --input frozen_graph.pb --inputs X:0 --outputs output:0 --output model.onnx +python -m tf2onnx.convert --checkpoint checkpoint.meta --inputs X:0 --outputs output:0 --output model.onnx + +For help and additional information see: + https://github.com/onnx/tensorflow-onnx + +If you run into issues, open an issue here: + https://github.com/onnx/tensorflow-onnx/issues +""" + + +def get_args(): + """Parse commandline.""" + parser = argparse.ArgumentParser(description="Convert tensorflow graphs to ONNX.", + formatter_class=argparse.RawDescriptionHelpFormatter, epilog=_HELP_TEXT) + parser.add_argument("--input", help="input from graphdef") + parser.add_argument("--graphdef", help="input from graphdef") + parser.add_argument("--saved-model", help="input from saved model") + parser.add_argument("--tag", help="tag to use for saved_model") + parser.add_argument("--signature_def", help="signature_def from saved_model to use") + parser.add_argument("--concrete_function", type=int, default=None, + help="For TF2.x saved_model, index of func signature in __call__ (--signature_def is ignored)") + parser.add_argument("--checkpoint", help="input from checkpoint") + parser.add_argument("--keras", help="input from keras model") + parser.add_argument("--tflite", help="input from tflite model") + parser.add_argument("--large_model", help="use the large model format (for models > 2GB)", action="store_true") + parser.add_argument("--output", help="output model file") + parser.add_argument("--inputs", help="model input_names (optional for saved_model, keras, and tflite)") + parser.add_argument("--outputs", help="model output_names (optional for saved_model, keras, and tflite)") + parser.add_argument("--ignore_default", help="comma-separated list of names of PlaceholderWithDefault " + "ops to change into Placeholder ops") + parser.add_argument("--use_default", help="comma-separated list of names of PlaceholderWithDefault ops to " + "change into Identity ops using their default value") + parser.add_argument("--rename-inputs", help="input names to use in final model (optional)") + parser.add_argument("--rename-outputs", help="output names to use in final model (optional)") + parser.add_argument("--opset", type=int, default=None, help="opset version to use for onnx domain") + parser.add_argument("--dequantize", help="Remove quantization from model. Only supported for tflite currently.", + action="store_true") + parser.add_argument("--custom-ops", help="comma-separated map of custom ops to domains in format OpName:domain") + parser.add_argument("--extra_opset", default=None, + help="extra opset with format like domain:version, e.g. com.microsoft:1") + parser.add_argument("--target", default=",".join(constants.DEFAULT_TARGET), choices=constants.POSSIBLE_TARGETS, + help="target platform") + parser.add_argument("--continue_on_error", help="continue_on_error", action="store_true") + parser.add_argument("--verbose", "-v", help="verbose output, option is additive", action="count") + parser.add_argument("--debug", help="debug mode", action="store_true") + parser.add_argument("--output_frozen_graph", help="output frozen tf graph to file") + parser.add_argument("--fold_const", help="Deprecated. Constant folding is always enabled.", + action="store_true") + # experimental + parser.add_argument("--inputs-as-nchw", help="transpose inputs as from nhwc to nchw") + args = parser.parse_args() + + args.shape_override = None + if args.input: + # for backward compativility + args.graphdef = args.input + if args.graphdef or args.checkpoint: + if not args.inputs or not args.outputs: + parser.error("graphdef and checkpoint models need to provide inputs and outputs") + if not any([args.graphdef, args.checkpoint, args.saved_model, args.keras, args.tflite]): + parser.print_help() + sys.exit(1) + if args.inputs: + args.inputs, args.shape_override = utils.split_nodename_and_shape(args.inputs) + if args.outputs: + args.outputs = args.outputs.split(",") + if args.ignore_default: + args.ignore_default = args.ignore_default.split(",") + if args.use_default: + args.use_default = args.use_default.split(",") + if args.rename_outputs: + args.rename_outputs = args.rename_outputs.split(",") + if args.rename_inputs: + args.rename_inputs = args.rename_inputs.split(",") + if args.inputs_as_nchw: + args.inputs_as_nchw = args.inputs_as_nchw.split(",") + if args.target: + args.target = args.target.split(",") + if args.signature_def: + args.signature_def = [args.signature_def] + if args.dequantize: + if not args.tflite: + parser.error("dequantize flag is currently only supported for tflite") + if args.extra_opset: + tokens = args.extra_opset.split(':') + if len(tokens) != 2: + parser.error("invalid extra_opset argument") + args.extra_opset = [utils.make_opsetid(tokens[0], int(tokens[1]))] + + return args + + +def make_default_custom_op_handler(domain): + def default_custom_op_handler(ctx, node, name, args): + node.domain = domain + return node + return default_custom_op_handler + + +def _convert_common(frozen_graph, name="unknown", large_model=False, output_path=None, + output_frozen_graph=None, **kwargs): + """Common processing for conversion.""" + + model_proto = None + external_tensor_storage = None + const_node_values = None + + with tf.Graph().as_default() as tf_graph: + if large_model: + const_node_values = compress_graph_def(frozen_graph) + external_tensor_storage = ExternalTensorStorage() + if output_frozen_graph: + utils.save_protobuf(output_frozen_graph, frozen_graph) + if not kwargs.get("tflite_path"): + tf.import_graph_def(frozen_graph, name='') + g = process_tf_graph(tf_graph, const_node_values=const_node_values, **kwargs) + onnx_graph = optimizer.optimize_graph(g) + model_proto = onnx_graph.make_model("converted from {}".format(name), + external_tensor_storage=external_tensor_storage) + if output_path: + if large_model: + utils.save_onnx_zip(output_path, model_proto, external_tensor_storage) + else: + utils.save_protobuf(output_path, model_proto) + + return model_proto, external_tensor_storage + + +def main(): + args = get_args() + logging.basicConfig(level=logging.get_verbosity_level(args.verbose)) + if args.debug: + utils.set_debug_mode(True) + + logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME) + + extra_opset = args.extra_opset or [] + tflite_path = None + custom_ops = {} + initialized_tables = None + tensors_to_rename = {} + if args.custom_ops: + using_tf_opset = False + for op in args.custom_ops.split(","): + if ":" in op: + op, domain = op.split(":") + else: + # default custom ops for tensorflow-onnx are in the "tf" namespace + using_tf_opset = True + domain = constants.TENSORFLOW_OPSET.domain + custom_ops[op] = (make_default_custom_op_handler(domain), []) + if using_tf_opset: + extra_opset.append(constants.TENSORFLOW_OPSET) + + if any(opset.domain == constants.CONTRIB_OPS_DOMAIN for opset in extra_opset): + try: + import tensorflow_text # pylint: disable=import-outside-toplevel + except ModuleNotFoundError: + logger.warning("tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used.") + + # get the frozen tensorflow model from graphdef, checkpoint or saved_model. + graph_def = None + inputs = None + outputs = None + model_path = None + + if args.graphdef: + graph_def, inputs, outputs = tf_loader.from_graphdef(args.graphdef, args.inputs, args.outputs) + model_path = args.graphdef + if args.checkpoint: + graph_def, inputs, outputs = tf_loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs) + model_path = args.checkpoint + if args.saved_model: + graph_def, inputs, outputs, initialized_tables, tensors_to_rename = tf_loader.from_saved_model( + args.saved_model, args.inputs, args.outputs, args.tag, args.signature_def, args.concrete_function, + args.large_model, return_initialized_tables=True, return_tensors_to_rename=True) + model_path = args.saved_model + if args.keras: + graph_def, inputs, outputs = tf_loader.from_keras( + args.keras, args.inputs, args.outputs) + model_path = args.keras + if args.tflite: + # Optional, but used to cut graph if provided. + inputs = args.inputs + outputs = args.outputs + tflite_path = args.tflite + model_path = tflite_path + + if args.verbose: + logger.info("inputs: %s", inputs) + logger.info("outputs: %s", outputs) + + if args.rename_inputs: + tensors_to_rename.update(zip(inputs, args.rename_inputs)) + if args.rename_outputs: + tensors_to_rename.update(zip(outputs, args.rename_outputs)) + + with tf.device("/cpu:0"): + model_proto, _ = _convert_common( + graph_def, + name=model_path, + continue_on_error=args.continue_on_error, + target=args.target, + opset=args.opset, + custom_op_handlers=custom_ops, + extra_opset=extra_opset, + shape_override=args.shape_override, + input_names=inputs, + output_names=outputs, + inputs_as_nchw=args.inputs_as_nchw, + large_model=args.large_model, + tensors_to_rename=tensors_to_rename, + ignore_default=args.ignore_default, + use_default=args.use_default, + tflite_path=tflite_path, + dequantize=args.dequantize, + initialized_tables=initialized_tables, + output_frozen_graph=args.output_frozen_graph, + output_path=args.output) + + + # write onnx graph + logger.info("") + logger.info("Successfully converted TensorFlow model %s to ONNX", model_path) + + logger.info("Model inputs: %s", [n.name for n in model_proto.graph.input]) + logger.info("Model outputs: %s", [n.name for n in model_proto.graph.output]) + if args.output: + if args.large_model: + logger.info("Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.", args.output) + else: + logger.info("ONNX model is saved at %s", args.output) + else: + logger.info("To export ONNX model to file, please run with `--output` option") + + +def tensor_names_from_structed(concrete_func, input_names, output_names): + tensors_to_rename = {} + args, kwargs = concrete_func.structured_input_signature + structured_inputs = [t.name for t in args if isinstance(t, tf.TensorSpec)] + sorted(kwargs.keys()) + tensors_to_rename.update(zip(input_names, structured_inputs)) + if isinstance(concrete_func.structured_outputs, dict): + for k, v in concrete_func.structured_outputs.items(): + tensors_to_rename[v.name] = k + return tensors_to_rename + + +def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None, + custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, shape_override=None, + target=None, large_model=False, output_path=None): + """Returns a ONNX model_proto for a tf.keras model. + + Args: + model: the tf.keras model we want to convert + input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input + opset: the opset to be used for the ONNX model, default is the latest + target: list of workarounds applied to help certain platforms + custom_op_handlers: dictionary of custom ops handlers + custom_rewriter: list of custom graph rewriters + extra_opset: list of extra opset's, for example the opset's used by custom ops + shape_override: dict with inputs that override the shapes given by tensorflow + inputs_as_nchw: transpose inputs in list from nchw to nhwc + large_model: use the ONNX external tensor storage format + output_path: save model to output_path + + Returns: + An ONNX model_proto and an external_tensor_storage dict. + """ + if LooseVersion(tf.__version__) < "2.0": + raise NotImplementedError("from_keras requires tf-2.0 or newer") + + if not input_signature: + raise ValueError("from_keras requires input_signature") + + from tensorflow.python.keras.saving import saving_utils as _saving_utils # pylint: disable=import-outside-toplevel + + # let tensorflow do the checking if model is a valid model + function = _saving_utils.trace_model_call(model, input_signature) + concrete_func = function.get_concrete_function(*input_signature) + + input_names = [input_tensor.name for input_tensor in concrete_func.inputs + if input_tensor.dtype != tf.dtypes.resource] + output_names = [output_tensor.name for output_tensor in concrete_func.outputs + if output_tensor.dtype != tf.dtypes.resource] + + initialized_tables = None + tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names) + + with tf.device("/cpu:0"): + frozen_graph = tf_loader.from_function(concrete_func, input_names, output_names, large_model=large_model) + model_proto, external_tensor_storage = _convert_common( + frozen_graph, + name=model.name, + continue_on_error=True, + target=None, + opset=opset, + custom_op_handlers=custom_ops, + extra_opset=extra_opset, + shape_override=shape_override, + input_names=input_names, + output_names=output_names, + inputs_as_nchw=inputs_as_nchw, + large_model=large_model, + tensors_to_rename=tensors_to_rename, + initialized_tables=initialized_tables, + output_path=output_path) + + return model_proto, external_tensor_storage + + +def from_function(function, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None, + custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, shape_override=None, target=None, + large_model=False, output_path=None): + """Returns a ONNX model_proto for a tf.function. + + Args: + function: the tf.function we want to convert + input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input + opset: the opset to be used for the ONNX model, default is the latest + target: list of workarounds applied to help certain platforms + custom_op_handlers: dictionary of custom ops handlers + custom_rewriter: list of custom graph rewriters + extra_opset: list of extra opset's, for example the opset's used by custom ops + shape_override: dict with inputs that override the shapes given by tensorflow + inputs_as_nchw: transpose inputs in list from nchw to nhwc + large_model: use the ONNX external tensor storage format + output_path: save model to output_path + + Returns: + An ONNX model_proto and an external_tensor_storage dict. + """ + if LooseVersion(tf.__version__) < "2.0": + raise NotImplementedError("from_function requires tf-2.0 or newer") + + if not input_signature: + raise ValueError("from_function requires input_signature") + + concrete_func = function.get_concrete_function(*input_signature) + + input_names = [input_tensor.name for input_tensor in concrete_func.inputs + if input_tensor.dtype != tf.dtypes.resource] + output_names = [output_tensor.name for output_tensor in concrete_func.outputs + if output_tensor.dtype != tf.dtypes.resource] + + initialized_tables = None + tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names) + + with tf.device("/cpu:0"): + frozen_graph = tf_loader.from_function(concrete_func, input_names, output_names, large_model=large_model) + model_proto, external_tensor_storage = _convert_common( + frozen_graph, + name=concrete_func.name, + continue_on_error=True, + target=None, + opset=opset, + custom_op_handlers=custom_ops, + extra_opset=extra_opset, + shape_override=shape_override, + input_names=input_names, + output_names=output_names, + inputs_as_nchw=inputs_as_nchw, + large_model=large_model, + tensors_to_rename=tensors_to_rename, + initialized_tables=initialized_tables, + output_path=output_path) + + return model_proto, external_tensor_storage + + +def from_graph_def(graph_def, name=None, input_names=None, output_names=None, opset=None, custom_ops=None, + custom_op_handlers=None, custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, + shape_override=None, target=None, large_model=False, tensors_to_rename=None, output_path=None): + """Returns a ONNX model_proto for a tensorflow graphdef. + + Args: + graph_def: the graphdef we want to convert + input_names: list of input names + output_names: list of output names + name: A name for the graph + opset: the opset to be used for the ONNX model, default is the latest + target: list of workarounds applied to help certain platforms + custom_op_handlers: dictionary of custom ops handlers + custom_rewriter: list of custom graph rewriters + extra_opset: list of extra opset's, for example the opset's used by custom ops + shape_override: dict with inputs that override the shapes given by tensorflow + inputs_as_nchw: transpose inputs in list from nchw to nhwc + large_model: use the ONNX external tensor storage format + output_path: save model to output_path + + Returns: + An ONNX model_proto and an external_tensor_storage dict. + """ + if not input_names: + raise ValueError("input_names needs to be provided") + if not output_names: + raise ValueError("output_names needs to be provided") + if not name: + name = "unknown" + initialized_tables = None + + with tf.device("/cpu:0"): + with tf.Graph().as_default() as tf_graph: + with tf_loader.tf_session(graph=tf_graph) as sess: + tf.import_graph_def(graph_def, name='') + frozen_graph = tf_loader.freeze_session(sess, input_names=input_names, output_names=output_names) + input_names = tf_loader.inputs_without_resource(sess, input_names) + frozen_graph = tf_loader.tf_optimize(input_names, output_names, graph_def) + + model_proto, external_tensor_storage = _convert_common( + frozen_graph, + name=name, + continue_on_error=True, + target=None, + opset=opset, + custom_op_handlers=custom_ops, + extra_opset=extra_opset, + shape_override=shape_override, + input_names=input_names, + output_names=output_names, + inputs_as_nchw=inputs_as_nchw, + large_model=large_model, + tensors_to_rename=tensors_to_rename, + initialized_tables=initialized_tables, + output_path=output_path) + + return model_proto, external_tensor_storage + + +if __name__ == "__main__": + main() diff --git a/lib/python3.10/site-packages/tf2onnx/flexbuffers.py b/lib/python3.10/site-packages/tf2onnx/flexbuffers.py new file mode 100644 index 0000000000000000000000000000000000000000..c16f9a27f91d76e9b67cebc03e55055e4f0274bb --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/flexbuffers.py @@ -0,0 +1,146 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.flexbuffers - Code for parsing flexbuffers +""" + +import struct + + +class FlexbufferParseException(Exception): + pass + + +def read_int(buffer, offset, bit_size): + size = 1 << bit_size + format_char = 'bhiq'[bit_size] + return struct.unpack('<' + format_char, buffer[offset:offset+size])[0] + + +def read_uint(buffer, offset, bit_size): + size = 1 << bit_size + format_char = 'BHIQ'[bit_size] + return struct.unpack('<' + format_char, buffer[offset:offset+size])[0] + + +def read_float(buffer, offset, bit_size): + if bit_size == 2: + return struct.unpack('> 2 + byte_size = 1 << bit_size + + if value_type == 0x0: + return None + if value_type in [0x1, 0x2, 0x3]: + read_fn = {0x1: read_int, 0x2: read_uint, 0x3: read_float}[value_type] + return read_fn(buffer, offset, parent_bit_size) + if value_type == 0x4: + str_offset = read_indirect(buffer, offset, parent_bit_size) + size = 0 + while read_int(buffer, str_offset + size, 0) != 0: + size += 1 + return read_string(buffer, str_offset, size, decode_strings) + if value_type == 0x5: + str_offset = read_indirect(buffer, offset, parent_bit_size) + size_bit_size = bit_size + size_byte_size = 1 << size_bit_size + size = read_uint(buffer, str_offset - size_byte_size, bit_size) + while read_int(buffer, str_offset + size, 0) != 0: + size_byte_size <<= 1 + size_bit_size += 1 + size = read_uint(buffer, str_offset - size_byte_size, size_bit_size) + return read_string(buffer, str_offset, size, decode_strings) + if value_type in [0x6, 0x7, 0x8]: + read_fn = {0x6: read_int, 0x7: read_uint, 0x8: read_float}[value_type] + data_offset = read_indirect(buffer, offset, parent_bit_size) + return read_fn(buffer, data_offset, bit_size) + if value_type == 0x9: + length = read_uint(buffer, read_indirect(buffer, offset, parent_bit_size) - byte_size, bit_size) + keys_offset = read_indirect(buffer, offset, parent_bit_size) - (byte_size * 3) + keys_vector_offset = read_indirect(buffer, keys_offset, bit_size) + key_byte_size = read_uint(buffer, keys_offset + byte_size, bit_size) + key_bit_size = {1: 0, 2: 1, 4: 2, 8: 3, 16: 4}[key_byte_size] + values_offset = read_indirect(buffer, offset, parent_bit_size) + packed_types_offset = values_offset + length * byte_size + obj = {} + for i in range(length): + key_offset = keys_vector_offset + i * key_byte_size + key = read_buffer(buffer, key_offset, key_bit_size, (0x4 << 2) | key_bit_size, decode_strings) + value_offset = values_offset + i * byte_size + value_packed_type = read_uint(buffer, packed_types_offset + i, 0) + value = read_buffer(buffer, value_offset, bit_size, value_packed_type, decode_strings) + obj[key] = value + return obj + if value_type == 0xa: + length = read_uint(buffer, read_indirect(buffer, offset, parent_bit_size) - byte_size, bit_size) + arr = [] + items_offset = read_indirect(buffer, offset, parent_bit_size) + packed_types_offset = items_offset + (length * byte_size) + for i in range(length): + item_offset = items_offset + (i * byte_size) + packed_type = read_uint(buffer, packed_types_offset + i, 0) + arr.append(read_buffer(buffer, item_offset, bit_size, packed_type, decode_strings)) + return arr + if value_type in [0xb, 0xc, 0xd, 0xe, 0xf, 0x24]: + length_offset = read_indirect(buffer, offset, parent_bit_size) - byte_size + length = read_uint(buffer, length_offset, bit_size) + item_value_type = value_type - 0xb + 0x1 + packed_type = item_value_type << 2 + items_offset = read_indirect(buffer, offset, parent_bit_size) + return read_array(buffer, items_offset, length, bit_size, packed_type, decode_strings) + if 0x10 <= value_type <= 0x18: + length = (value_type - 0x10) // 3 + 2 + value_type = ((value_type - 0x10) % 3) + 1 + packed_type = value_type << 2 + items_offset = read_indirect(buffer, offset, parent_bit_size) + return read_array(buffer, items_offset, length, bit_size, packed_type, decode_strings) + if value_type == 0x19: + data_offset = read_indirect(buffer, offset, parent_bit_size) + size_offset = data_offset - byte_size + size = read_uint(buffer, size_offset, bit_size) + return read_bytes(buffer, data_offset, size) + if value_type == 0x1a: + return read_uint(buffer, offset, parent_bit_size) > 0 + raise FlexbufferParseException("Invalid flexbuffer value type %r" % value_type) + + +def read_flexbuffer(buffer, decode_strings=True): + byte_size = read_uint(buffer, len(buffer) - 1, 0) + bit_size = {1: 0, 2: 1, 4: 2, 8: 3, 16: 4}[byte_size] + packed_type = read_uint(buffer, len(buffer) - 2, 0) + offset = len(buffer) - 2 - byte_size + return read_buffer(buffer, offset, bit_size, packed_type, decode_strings) diff --git a/lib/python3.10/site-packages/tf2onnx/graph.py b/lib/python3.10/site-packages/tf2onnx/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..4456c33d896f0b13766bc8827308318f81d5899b --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/graph.py @@ -0,0 +1,1730 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.graph - class to manage graph manipulation on top of onnx +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import collections +import copy +import logging +import six +import numpy as np + +from onnx import helper, numpy_helper, shape_inference, OperatorSetIdProto, AttributeProto, TensorProto +from tf2onnx import utils, __version__ +from tf2onnx.utils import make_name, port_name, find_opset +from tf2onnx import optimizer +from tf2onnx.schemas import get_schema, infer_onnx_shape_dtype +from tf2onnx import constants + +logger = logging.getLogger(__name__) + + +# todo(pengwa): remove protected-access later +# pylint: disable=broad-except,protected-access + +class ExternalTensorStorage(): + """Passed into graph and node methods to accumulate tensors to save externally""" + def __init__(self): + self.name_to_tensor_data = {} + self.name_counter = 0 + self.external_tensor_size_threshold = 1024 + self.node_to_modified_value_attr = {} + +class Node(object): + """A Node - wrapper around onnx nodes that we use for graph manipulations.""" + + def __init__(self, node, graph, skip_conversion=False): + """Create Node. + Args: + node: Onnx node in NodeProto + graph: Graph() we are part of + """ + self._op = node + self.graph = graph + self._input = list(node.input) + self._output = list(node.output) + self._attr = {} + + graph.set_node_by_name(self) + # dict to original attributes + for a in node.attribute: + self._attr[a.name] = a + self._skip_conversion = skip_conversion + + @property + def input(self): + return self._input + + @input.setter + def input(self, val): + # The setter can catch that all inputs are change + # but it cannot catch that one input is changed. + # That's method replace_input and replace_inputs must + # be used to change inputs to let the graph instance + # update its internal indices. + self._input = copy.deepcopy(val) + + @property + def output(self): + return self._output + + @output.setter + def output(self, val): + """Set op output. Output should be updated explicitly, + changing it would require output mapping changed. + """ + self._graph_check() + for o in self._output: + del self.graph._output_to_node_name[o] + + self._output = val.copy() + for o in self._output: + utils.make_sure(o not in self.graph._output_to_node_name, "output %s already in output mapping", o) + self.graph._output_to_node_name[o] = self.name + + @property + def inputs(self): + """Input node objects.""" + self._graph_check() + val = [self.graph.get_node_by_output(n) for n in self._input] + return val + + @property + def attr(self): + return self._attr + + def get_value_attr(self, external_tensor_storage=None): + """Return onnx attr for value property of node. + Attr is modified to point to external tensor data stored in external_tensor_storage, if included. + """ + a = self._attr["value"] + if external_tensor_storage is not None and self in external_tensor_storage.node_to_modified_value_attr: + return external_tensor_storage.node_to_modified_value_attr[self] + if external_tensor_storage is None or a.type != AttributeProto.TENSOR: + return a + if np.product(a.t.dims) > external_tensor_storage.external_tensor_size_threshold: + a = copy.copy(a) + tensor_name = self.name.strip() + "_" + str(external_tensor_storage.name_counter) + for c in '~"#%&*:<>?/\\{|}': + tensor_name = tensor_name.replace(c, '_') + external_tensor_storage.name_counter += 1 + external_tensor_storage.name_to_tensor_data[tensor_name] = a.t.raw_data + external_tensor_storage.node_to_modified_value_attr[self] = a + a.t.raw_data = b'' + location = a.t.external_data.add() + location.key = "location" + location.value = tensor_name + a.t.data_location = TensorProto.EXTERNAL + return a + + def get_onnx_attrs(self, external_tensor_storage=None): + """Return onnx valid attributes. + Attrs point to external tensor data stored in external_tensor_storage, if included.""" + schema = get_schema(self.type, self.graph.opset, self.domain) + if schema is None and not (self.is_const() or self.is_graph_input()): + logger.debug("Node %s uses non-stardard onnx op <%s, %s>, skip attribute check", + self.name, self.domain, self.type) + onnx_attrs = {} + for a in self._attr.values(): + if a.name == "value": + onnx_attrs[a.name] = self.get_value_attr(external_tensor_storage) + elif schema is None or schema.has_attribute(a.name): + onnx_attrs[a.name] = a + return onnx_attrs + + @property + def name(self): + return self._op.name + + def child_name(self): + return utils.make_name(self.name) + + @property + def op(self): + """TODO: have a better interface for this.""" + return self._op + + @property + def type(self): + """Return Op type.""" + return self._op.op_type + + @type.setter + def type(self, val): + """Set Op type.""" + self._op.op_type = val + + @property + def domain(self): + """Return Op type.""" + return self._op.domain + + @domain.setter + def domain(self, val): + """Set Op type.""" + self._op.domain = val + + @property + def data_format(self): + """Return data_format.""" + attr_str = self.get_attr_value("data_format") + return "unkown" if attr_str is None else attr_str.decode("utf-8") + + @data_format.setter + def data_format(self, val): + """Set data_format.""" + self.set_attr("data_format", val) + + def is_nhwc(self): + """Return True if node is in NHWC format.""" + utils.make_sure('D' not in self.data_format, "is_nhwc called on %s with spatial=2 but data_format=%s", + self.name, self.data_format) + return self.data_format == "NHWC" + + def is_const(self): + """Return True if node is a constant.""" + return self.type in ["Const", "ConstV2"] + + def is_scalar(self): + """Return True if node is a constant with a scalar value.""" + if not self.is_const(): + return False + t = self.get_attr("value", default=None) + if t is None: + return False + t = numpy_helper.to_array(helper.get_attribute_value(t)) + return t.shape == tuple() + + def is_graph_input(self): + return self.type in ["Placeholder", "PlaceholderWithDefault", "PlaceholderV2"] + + def is_graph_input_default_const(self): + return self.is_const() and any( + out.is_graph_input() for out in self.graph.find_output_consumers(self.output[0]) + ) + + def is_while(self): + return self.type in ["While", "StatelessWhile", "Loop"] + + def __str__(self): + return str(self._op) + + def __repr__(self): + return "" % (self.type, self._op.name) + + @property + def summary(self): + """Return node summary information.""" + lines = [] + lines.append("OP={}".format(self.type)) + lines.append("Name={}".format(self.name)) + + g = self.graph + if self.input: + lines.append("Inputs:") + for name in self.input: + node = g.get_node_by_output(name) + op = node.type if node else "N/A" + lines.append("\t{}={}, {}, {}".format(name, op, g.get_shape(name), g.get_dtype(name))) + + if self.output: + for name in self.output: + lines.append("Outpus:") + lines.append("\t{}={}, {}".format(name, g.get_shape(name), g.get_dtype(name))) + + return '\n'.join(lines) + + def get_attr(self, name, default=None): + """Get raw attribute value.""" + attr = self.attr.get(name, default) + return attr + + def get_attr_value(self, name, default=None): + attr = self.get_attr(name) + if attr: + return helper.get_attribute_value(attr) + return default + + def get_attr_int(self, name): + """Get attribute value as int.""" + attr_int = self.get_attr_value(name) + utils.make_sure( + attr_int is not None and isinstance(attr_int, int), + "attribute %s is None", name + ) + return attr_int + + def get_attr_str(self, name, encoding="utf-8"): + """Get attribute value as string.""" + attr_str = self.get_attr_value(name) + utils.make_sure( + attr_str is not None and isinstance(attr_str, bytes), + "attribute %s is None", name + ) + return attr_str.decode(encoding) + + def set_attr(self, name, value): + self.attr[name] = helper.make_attribute(name, value) + + def set_attr_onnx(self, value): + self.attr[value.name] = value + + @property + def skip_conversion(self): + return self._skip_conversion + + @skip_conversion.setter + def skip_conversion(self, val): + self._skip_conversion = val + + # If some Node is created as onnx_node, then we don't need convert it + def need_skip(self): + return self._skip_conversion + + @property + def output_shapes(self): + """Get output shapes.""" + self._graph_check() + val = [self.graph.get_shape(n) for n in self._output] + return val + + @property + def output_dtypes(self): + """Get output dtypes.""" + self._graph_check() + val = [self.graph.get_dtype(n) for n in self._output] + return val + + def get_tensor_value(self, as_list=True): + """Get value for onnx tensor. + Args: + as_list: whether return numpy ndarray in list. + Returns: + If as_list=True, return the array as a (possibly nested) list. + Otherwise, return data of type np.ndarray. + + If a tensor is a scalar having value 1, + when as_list=False, return np.array(1), type is + when as_list=True, return 1, type is . + """ + if not self.is_const(): + raise ValueError("get tensor value: '{}' must be Const".format(self.name)) + + t = self.get_attr("value") + if t: + t = numpy_helper.to_array(helper.get_attribute_value(t)) + if as_list is True: + t = t.tolist() # t might be scalar after tolist() + return t + + def scalar_to_dim1(self): + """Get value for onnx tensor.""" + if not self.is_const(): + raise ValueError("get tensor value: {} must be Const".format(self.name)) + + t = self.get_attr("value") + if t: + t = helper.get_attribute_value(t) + if not t.dims: + t.dims.extend([1]) + return t.dims + + def set_tensor_value(self, new_val): + """Set new value for existing onnx tensor. + Args: + new_val: value of type numpy ndarray + """ + if not self.is_const(): + raise ValueError("set tensor value: {} must be Const".format(self.name)) + t = self.get_attr("value") + if not t: + raise ValueError("set tensor value: {} is None".format(self.name)) + t = helper.get_attribute_value(t) + onnx_tensor = numpy_helper.from_array(new_val, t.name) + del t + self.set_attr("value", onnx_tensor) + # track shapes in _output_shapes + self._graph_check() + self.graph.set_shape(onnx_tensor.name, list(onnx_tensor.dims)) + + def get_body_graphs(self): + self._graph_check() + return self.graph.contained_graphs.get(self.name, None) + + def set_body_graph_as_attr(self, attr_name, graph): + self._graph_check() + if self.name not in self.graph.contained_graphs: + self.graph.contained_graphs[self.name] = {} + + self.graph.contained_graphs[self.name].update({attr_name: graph}) + graph.parent_graph = self.graph + + def update_proto(self, external_tensor_storage=None): + """Update protobuf from internal structure.""" + nodes = list(self._op.input) + for node in nodes: + self._op.input.remove(node) + self._op.input.extend(self.input) + nodes = list(self._op.output) + for node in nodes: + self._op.output.remove(node) + self._op.output.extend(self.output) + + # update attributes to proto + del self._op.attribute[:] + + # check attribute of type GraphProto + attr_graphs = self.get_body_graphs() + if attr_graphs: + for attr_name, sub_graph in attr_graphs.items(): + graph_proto = sub_graph.make_graph("graph for " + self.name + " " + attr_name, + external_tensor_storage=external_tensor_storage) + self.set_attr(attr_name, graph_proto) + + attr = list(self.get_onnx_attrs(external_tensor_storage).values()) + if attr: + self._op.attribute.extend(attr) + + def get_implicit_inputs(self, recursive=True): + """Get implicit inputs if the node has attributes being GraphProto.""" + output_available_in_cur_graph = set() + all_node_inputs = set() + + graphs = [] + body_graphs = self.get_body_graphs() + if body_graphs: + graphs.extend(body_graphs.values()) + + while graphs: + graph = graphs.pop() + for n in graph.get_nodes(): + output_available_in_cur_graph |= set(n.output) + for i in n.input: + all_node_inputs.add(i) + + if recursive: + b_graphs = n.get_body_graphs() + if b_graphs: + graphs.extend(b_graphs.values()) + + outer_scope_node_input_ids = all_node_inputs - output_available_in_cur_graph + return list(outer_scope_node_input_ids) + + def _graph_check(self): + utils.make_sure(self.graph is not None, "Node %s not belonging any graph", + self.name) + + def maybe_cast_input(self, supported, type_map): + """.maybe_cast_input + Args: + supported: list of supported types for inputs + type_map: dict type to supported type mapping + """ + did_cast = False + for i, name in enumerate(self.input): + dtype = self.graph.get_dtype(name) + if dtype not in supported[i]: + tdtype = type_map.get(dtype) + if tdtype is None: + raise RuntimeError("don't know how to cast type {} on node {}".format(dtype, name)) + shape = self.graph.get_shape(name) + cast_node = self.graph.insert_new_node_on_input( + self, "Cast", name, to=tdtype) + self.graph.set_dtype(cast_node.output[0], [tdtype]) + self.graph.set_shape(cast_node.output[0], shape) + did_cast = True + return did_cast + + +class Graph(object): + """"Class that provides graph manipulation and matching.""" + + def __init__(self, nodes, output_shapes=None, dtypes=None, target=None, opset=None, extra_opset=None, + input_names=None, output_names=None, is_subgraph=False, graph_name=None): + """Create Graph. + Args: + nodes: list of Node() + output_shapes: dict of tensorflow output shapes + dtypes: dict of tensorflow dtype + """ + if target is None: + target = [] + self._nodes = [] + self._nodes_by_name = {} + self._output_to_node_name = {} + self._output_to_consumers = {} + self._input_to_graph = {} + self.shapes = {} + self.graph_name = graph_name or utils.make_name("tf2onnx") + self._is_subgraph = is_subgraph + self.ta_reads = [] + # A list of index, output tuples of potential scan outputs in this graph + # Used by the tflite while loop handler + self.scan_outputs = [] + self.func_inputs = [] + + self._target = set(target) + self._dtypes = dtypes + + self._output_shapes = output_shapes + self._opset = find_opset(opset) + + if extra_opset is not None: + utils.make_sure(isinstance(extra_opset, list), "invalid extra_opset") + self._extra_opset = extra_opset + + self.outputs = output_names if output_names is not None else [] + + self.parent_graph = None + self.contained_graphs = {} # {node_name: {node_attribute_name: Graph}} + + ops = [Node(node, self) for node in nodes] + if input_names is not None: + input_names_set = set(input_names) + for n in ops: + for i, out in enumerate(n.output): + if out in input_names_set and not n.is_graph_input(): + n.output[i] = utils.make_name("@@ALLOC") + ops.append(Node(helper.make_node("Placeholder", [], outputs=[out], name=out), self)) + logger.info("Created placeholder for input %s", out) + + input_nodes = {n.output[0]: n for n in ops if n.is_graph_input()} + if input_names is not None: + self.inputs = [input_nodes[n] for n in input_names] + else: + self.inputs = list(input_nodes.values()) + + self.reset_nodes(ops) + + if not is_subgraph: + # add identity node after each output, in case it is renamed during conversion. + for o in self.outputs: + n = self.get_node_by_output_in_current_graph(o) + if n.is_graph_input(): + # Don't add identity if the node is also an input. We want to keep input names the same. + continue + new_output_name = port_name(n.name + "_" + utils.make_name("raw_output_")) + n_shapes = n.output_shapes + n_dtypes = n.output_dtypes + body_graphs = n.graph.contained_graphs.pop(n.name, None) + self.remove_node(n.name) + + new_outputs = [output if output != o else new_output_name for output in n.output] + # domain should be passed to new node + branches = {} + if body_graphs: + for attr_name, body_graph in body_graphs.items(): + body_graph.parent_graph = self + branches[attr_name] = body_graph + + _ = self.make_node(n.type, n.input, outputs=new_outputs, attr=n.attr, name=n.name, + skip_conversion=n._skip_conversion, dtypes=n_dtypes, shapes=n_shapes, + domain=n.domain, branches=branches) + + self.replace_all_inputs(o, new_output_name, ops=self.get_nodes()) + self.make_node("Identity", [new_output_name], outputs=[o], op_name_scope=n.name + "_" + "graph_outputs") + self.copy_shape(new_output_name, o) + self.copy_dtype(new_output_name, o) + + def create_new_graph_with_same_config(self): + """Create a clean graph inheriting current graph's configuration.""" + return Graph([], output_shapes={}, dtypes={}, target=self._target, opset=self._opset, + extra_opset=self.extra_opset, output_names=[]) + + @property + def input_names(self): + """Placeholder node outputs""" + return [node.output[0] for node in self.inputs] + + @property + def opset(self): + return self._opset + + @property + def extra_opset(self): + return self._extra_opset + + def is_target(self, *names): + """Return True if target platform contains any name.""" + return any(name in self._target for name in names) + + def make_consts(self, values, np_type=np.int64, skip_conversion=False, raw=True): + """create list of consts of same type""" + consts = [] + for value in values: + np_val = np.array(value).astype(np_type) + consts.append(self.make_const(utils.make_name("const"), np_val, skip_conversion, raw)) + return consts + + def make_const(self, name, np_val, skip_conversion=False, raw=True): + """Make a new constant in the graph. + Args: + name: const node name, must be unique. + np_val: value of type numpy ndarray. + skip_conversion: bool, indicate whether this created node would be mapped during conversion. + raw: whether to store data at field of raw_data or the specific field according to its dtype + """ + np_val_flat = np_val.flatten() + is_bytes = np_val.dtype == np.object and len(np_val_flat) > 0 and isinstance(np_val_flat[0], bytes) + if raw and not is_bytes: + onnx_tensor = numpy_helper.from_array(np_val, name) + else: + onnx_tensor = helper.make_tensor(name, utils.map_numpy_to_onnx_dtype(np_val.dtype), + np_val.shape, np_val_flat, raw=False) + dtype = onnx_tensor.data_type + node = self.make_node("Const", [], outputs=[name], name=name, attr={"value": onnx_tensor}, + skip_conversion=skip_conversion, dtypes=[dtype], infer_shape_dtype=False) + self.set_shape(name, np_val.shape) + self.set_dtype(name, utils.map_numpy_to_onnx_dtype(np_val.dtype)) + return node + + def copy_const(self, node, name=None): + """Copy a const node, using name if specified""" + # TODO: support attr copy starting at opset 12 + if name is None: + name = utils.make_name(node.name) + return self.make_const(name, node.get_tensor_value(as_list=False)) + + def make_node(self, op_type, inputs, attr=None, output_count=1, outputs=None, skip_conversion=True, + op_name_scope=None, name=None, shapes=None, dtypes=None, domain=constants.ONNX_DOMAIN, + infer_shape_dtype=True, branches=None): + """Make a new onnx node in the graph""" + if attr is None: + attr = {} + if shapes is None: + shapes = [] + if dtypes is None: + dtypes = [] + if branches is None: + branches = {} + if name is None: + name = utils.make_name(op_type) + + if op_name_scope: + name = "_".join([op_name_scope, name]) + + logger.debug("Making node: Name=%s, OP=%s", name, op_type) + + if outputs is None: + outputs = [name + ":" + str(i) for i in range(output_count)] + + output_count = len(outputs) + raw_attr = {} + onnx_attrs = [] + for a, v in attr.items(): + if isinstance(v, AttributeProto): + onnx_attrs.append(v) + else: + raw_attr[a] = v + + n = self.get_node_by_name(name) + utils.make_sure(n is None, "name %s already exists in node: \n%s", name, n) + for o in outputs: + n = self.get_node_by_output_in_current_graph(o) + utils.make_sure(n is None, "output tensor named %s already exists in node: \n%s", o, n) + + onnx_node = helper.make_node(op_type, inputs, outputs, name=name, domain=domain, **raw_attr) + + for name2 in onnx_node.input: + self._register_input_name(name2, onnx_node) + + if op_type in ["If", "Loop", "Scan"]: + # we force the op containing inner graphs not skipped during conversion. + skip_conversion = False + + node = Node(onnx_node, self, skip_conversion=skip_conversion) + if onnx_attrs: + _ = [node.set_attr_onnx(a) for a in onnx_attrs] + + for branch, body in branches.items(): + node.set_body_graph_as_attr(branch, body) + + if shapes: + utils.make_sure(len(shapes) == output_count, + "output shape count %s not equal to output count %s", len(shapes), output_count) + for i in range(output_count): + self.set_shape(node.output[i], shapes[i]) + + if dtypes: + utils.make_sure(len(dtypes) == output_count, + "output dtypes count %s not equal to output count %s", len(dtypes), output_count) + for i in range(output_count): + self.set_dtype(node.output[i], dtypes[i]) + + if (not shapes or not dtypes) and infer_shape_dtype: + self.update_node_shape_dtype(node, override=False) + + logger.debug("Made node: %s\n%s", node.name, node.summary) + self._nodes.append(node) + return node + + def append_node(self, node): + "Add a node to the graph." + output_shapes = node.output_shapes + output_dtypes = node.output_dtypes + node.graph = self + self._nodes.append(node) + self._nodes_by_name[node.name] = node + for i, name in enumerate(node.output): + self._output_to_node_name[name] = node.name + self.set_dtype(name, output_dtypes[i]) + self.set_shape(name, output_shapes[i]) + for name in node.input: + self._register_input_name(name, node) + + def remove_node(self, node_name): + """Remove node in current graph.""" + utils.make_sure(node_name in self._nodes_by_name, "node %s not in current graph, cannot remove", node_name) + node = self.get_node_by_name(node_name) + del self._nodes_by_name[node_name] + if node_name in self.contained_graphs: + del self.contained_graphs[node_name] + + if node in self.inputs: + self.inputs.remove(node) + + for op_output in node.output: + del self._output_to_node_name[op_output] + + if op_output in self._output_shapes: + del self._output_shapes[op_output] + if op_output in self._dtypes: + del self._dtypes[op_output] + + for op_input in node.input: + utils.make_sure( + op_input in self._output_to_consumers, + "Input %r of node %r not found.", op_input, node_name) + self._unregister_input_name(op_input, node) + + self._nodes.remove(node) + node.graph = None + + def reset_nodes(self, ops): + """Reset the graph with node list.""" + remained_dtypes = {} + remained_shapes = {} + remained_sub_graphs = {} + for op in ops: + for op_output in op.output: + # this check should be removed once we make sure all output tensors have dtype/shape. + if op_output in self._dtypes: + remained_dtypes[op_output] = self._dtypes[op_output] + if op_output in self._output_shapes: + remained_shapes[op_output] = self._output_shapes[op_output] + + if op.name in self.contained_graphs: + remained_sub_graphs[op.name] = self.contained_graphs[op.name] + + self._nodes = ops + self.contained_graphs = remained_sub_graphs + self._nodes_by_name = {op.name: op for op in ops} + self._output_to_node_name = {} + self._output_to_consumers = {} + for op in ops: + for op_output in op.output: + self._output_to_node_name[op_output] = op.name + inps = op.input + for op_input in inps: + self._register_input_name(op_input, op) + + for n in self.inputs: + if n not in ops: + raise ValueError("graph input " + n + " not exist") + for o in self.outputs: + if o not in self._output_to_node_name: + raise ValueError("graph output " + o + " not exist") + + self._dtypes = remained_dtypes + self._output_shapes = remained_shapes + + def is_empty_input(self, name): + # in ONNX, operation may have optional input and an empty string may be used + # in the place of an actual argument's name to indicate a missing argument + return name == utils.ONNX_EMPTY_INPUT + + def check_integrity(self): + """ + Check graph integrity. Every node's input needs to associate with a node. + Return broken outputs. + """ + broken_outputs = set() + for node in self.get_nodes(): + for inp in node.input: + if self.get_node_by_output(inp) is None and not self.is_empty_input(inp): + broken_outputs.add(inp) + return list(broken_outputs) + + def update_node_shape_dtype(self, node, override=False): + """Try the best to infer shapes and dtypes for outputs of the node, + by default, we respect TF shapes and dtypes. + """ + if node.is_const() or node.is_graph_input(): + return + # NOTE: only support onnx node for now + if not utils.is_onnx_domain(node.domain): + return + + logger.debug("Infer shape and dtype for [%s]", node.name) + # NOTE: shape inference for some ops need the input values of the op, e.g., Reshape + # op needs the "Shape" value to infer output shape. + initializers = [] + for i, inp in enumerate(node.inputs): + if inp is None: + if not self.is_empty_input(node.input[i]): + if logger.isEnabledFor(logging.INFO): + logger.warning( + "[%s] infer a inexistent node: [%s], please check the code", + node.name, node.input[i] + ) + continue + if inp.is_const(): + t = inp.get_attr("value") + tensor = helper.get_attribute_value(t) + tensor.name = inp.output[0] + initializers.append(tensor) + + input_shapes = [self.get_shape(i) for i in node.input] + input_dtypes = [self.get_dtype(i) for i in node.input] + + shapes, dtypes = infer_onnx_shape_dtype(node, self._opset, input_shapes, input_dtypes, initializers) + if not shapes or not dtypes: + return + + for output, shape, dtype in zip(node.output, shapes, dtypes): + if dtype == TensorProto.UNDEFINED: + logger.debug("Inferred dtype for [%s, type: %s] is UNDEFINED, SKIP", node.name, node.type) + else: + existing_dtype = self.get_dtype(output) + if existing_dtype is not None and existing_dtype != dtype: + if override: + logger.warning("Override dtype of %s from %s to %s", output, existing_dtype, dtype) + else: + dtype = existing_dtype + self.set_dtype(output, dtype) + logger.debug("Set dtype of [%s] to %s", output, dtype) + + if shape is None: + logger.debug("Inferred shape for [%s, type: %s] is None, SKIP", node.name, node.type) + else: + existing_shape = self.get_shape(output) + if existing_shape is not None and not utils.are_shapes_equal(existing_shape, shape): + if override: + logger.warning("Override shape of %s from %s to %s", output, existing_shape, shape) + else: + shape = existing_shape + self.set_shape(output, shape) + logger.debug("Set shape of [%s] to %s", output, shape) + + def update_proto(self, external_tensor_storage=None): + """Update the onnx protobuf from out internal Node structure.""" + for node in self._nodes: + node.update_proto(external_tensor_storage) + + def get_nodes(self): + """Get node list.""" + return self._nodes + + def get_node_by_output(self, output, search_in_parent_graphs=True): + """Get node by node output id recursively going through nested graphs. + Args: + search_in_parent_graphs: search in all parent graphs + """ + ret = None + g = self + while not ret and g: + ret = g.get_node_by_output_in_current_graph(output) + if ret: + return ret + + if not search_in_parent_graphs: + break + g = g.parent_graph + return ret + + def get_node_by_output_in_current_graph(self, output): + """Get node by node output id.""" + name = self._output_to_node_name.get(output) + ret = None + if name: + ret = self._nodes_by_name.get(name) + return ret + + def get_node_by_name(self, name): + """Get node by name.""" + ret = self._nodes_by_name.get(name) + return ret + + def set_node_by_name(self, node): + """Set node by name.""" + self._nodes_by_name[node.name] = node + for op_output in node.output: + self._output_to_node_name[op_output] = node.name + for name in node.input: + self._register_input_name(name, node) + + def change_node_name(self, node, new_name): + """Remove node in current graph.""" + utils.make_sure(new_name not in self._nodes_by_name, "node %s not unique ", new_name) + dtypes = node.output_dtypes + shapes = node.output_shapes + self.remove_node(node.name) + new_node = self.make_node(node.type, node.input, output_count=len(node.output), + attr=node.attr, dtypes=dtypes, shapes=shapes, name=new_name) + for i, old_output in enumerate(node.output): + new_output = port_name(new_name, i) + for j, k in enumerate(self.outputs): + if k == old_output: + self.outputs[j] = new_output + break + self.replace_all_inputs(old_output, new_output, ops=self.get_nodes()) + return new_node + + def add_graph_input(self, name, dtype=None, shape=None): + """Add placeholder node as graph's input. Order matters only for subgraph. + Placeholders in original graph are assumed for main graph, order not matters. + """ + if dtype is None: + dtype = self.get_dtype(name) + + if shape is None: + shape = self.get_shape(name) + + new_node = self.make_node("Placeholder", [], outputs=[name], dtypes=[dtype], shapes=[shape]) + self.inputs.append(new_node) + + def add_graph_input_with_default(self, name, default_const, dtype=None, shape=None): + """Add placeholderwithdefault.""" + if dtype is None: + dtype = self.get_dtype(name) + + if shape is None: + shape = self.get_shape(name) + + default_const_name = port_name(make_name("{}_default".format(name))) + default_const.output = [default_const_name] + new_node = self.make_node("PlaceholderWithDefault", [default_const_name], outputs=[name], + dtypes=[dtype], shapes=[shape]) + self.inputs.append(new_node) + + def add_graph_output(self, name, dtype=None, shape=None): + """Add node output as graph's output.""" + utils.make_sure(name in self._output_to_node_name, "output %s not exist in the graph", name) + + if dtype is None: + dtype = self.get_dtype(name) + + if shape is None: + shape = self.get_shape(name) + + if name not in self.outputs: + utils.make_sure(shape is not None, "shape for output %s should not be None", name) + utils.make_sure(dtype is not None, "dtype for output %s should not be None", name) + self.outputs.append(name) + self.set_shape(name, shape) + self.set_dtype(name, dtype) + else: + raise ValueError("graph output " + name + " already exists") + + def get_dtype(self, name): + """Get dtype for node.""" + node = self.get_node_by_output(name, search_in_parent_graphs=True) + return node.graph._dtypes.get(name) if node else None + + def set_dtype(self, name, dtype): + """Set dtype for node.""" + node = self.get_node_by_output(name, search_in_parent_graphs=True) + node.graph._dtypes[name] = dtype + + def copy_dtype(self, src_name, dst_name): + """Copy dtype from another node.""" + dtype = self.get_dtype(src_name) + self.set_dtype(dst_name, dtype) + + def get_shape(self, name): + """Get shape for node.""" + utils.make_sure(isinstance(name, six.text_type), "get_shape name is invalid type: %s", name) + node = self.get_node_by_output(name, search_in_parent_graphs=True) + shape = node.graph._output_shapes.get(name) if node else None + if shape: + for i, v in enumerate(shape): + if v is None: + # pylint: disable=unsupported-assignment-operation + shape[i] = -1 + # hack to allow utils.ONNX_UNKNOWN_DIMENSION to override batchsize if needed. + # default is -1. + if shape[0] == -1: + # pylint: disable=unsupported-assignment-operation + shape[0] = utils.ONNX_UNKNOWN_DIMENSION + return shape + return shape + + def get_rank(self, name): + """Returns len(get_shape(name)) or None if shape is None""" + shape = self.get_shape(name) + if shape is None: + return None + return len(shape) + + def set_shape(self, name, val): + """Set new shape of node.""" + if isinstance(val, np.ndarray): + val = val.tolist() + if isinstance(val, tuple): + val = list(val) + node = self.get_node_by_output(name, search_in_parent_graphs=True) + utils.make_sure(node is not None, "cannot find node by output id %s", name) + node.graph._output_shapes[name] = val + + def copy_shape(self, input_name, output_name): + """Copy shape from another node.""" + shape = self.get_shape(input_name) + # assert shape is not None + if shape is not None: + self.set_shape(output_name, shape) + + def topological_sort(self, ops): + """Topological sort of graph.""" + # sort by name, the result will be reversed alphabeta + ops.sort(key=lambda op: op.name) + + def _push_stack(stack, node, in_stack): + stack.append(node) + if node in in_stack: + raise ValueError('Graph has cycles, node=' + ops[node].name) + in_stack[node] = True + + def _get_unvisited_child(g, node, not_visited): + for child in g[node]: + if child in not_visited: + return child + return -1 + + n = len(ops) + g = [[] for _ in range(n)] + op_name_to_index = {} + for i, op in enumerate(ops): + op_name_to_index[op.name] = i + + for i, op in enumerate(ops): + all_input = set(op.input) + implicit_inputs = op.get_implicit_inputs() + all_input |= set(implicit_inputs) + # remove those empty inputs + all_input = list(filter(lambda a: a != '', all_input)) + for inp in sorted(all_input): + j = self.get_node_by_output(inp) + utils.make_sure(j is not None, "Cannot find node with output %r in graph %r", inp, self.graph_name) + if self.parent_graph and j.name not in op_name_to_index: + # there might be some outer-scoped inputs for an inner Graph. + pass + else: + g[op_name_to_index[j.name]].append(i) + + # label for each op. highest = sink nodes. + label = [-1 for _ in range(n)] + stack = [] + in_stack = dict() + not_visited = dict.fromkeys(range(n)) + label_counter = n - 1 + + while not_visited: + node = list(not_visited.keys())[0] + _push_stack(stack, node, in_stack) + while stack: + node = _get_unvisited_child(g, stack[-1], not_visited) + if node != -1: + _push_stack(stack, node, in_stack) + else: + node = stack.pop() + in_stack.pop(node) + not_visited.pop(node) + label[node] = label_counter + label_counter -= 1 + + ret = [x for _, x in sorted(zip(label, ops))] + self.reset_nodes(ret) + + def make_graph(self, doc, graph_name=None, external_tensor_storage=None): + """ + Create GraphProto for onnx from internal graph. + Args: + optimize: optimize graph via onnx + doc: text for doc string of the graph + """ + graph_name = graph_name or self.graph_name + self.delete_unused_nodes(self.outputs) + self.topological_sort(self.get_nodes()) + self.update_proto(external_tensor_storage) + + # TODO: we'd want to do something like this so that transpose optimizer is active + # for all (unit) tests + # if optimize: + # from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer + # optimizer = TransposeOptimizer(self, False) + # optimizer.optimize() + ops = [] + const_ops = [] + graph_inputs = self.inputs.copy() + for op in self.get_nodes(): + if op.is_const(): + const_ops.append(op) + elif op.is_graph_input(): + if op not in graph_inputs: + graph_inputs.append(op) + else: + ops.append(op) + + # create initializers for placeholder with default nodes + initializers = [] + placeholder_default_const_ops = [] + for op in graph_inputs: + if op.type == "PlaceholderWithDefault": + utils.make_sure(op.inputs[0] is not None, "Cannot find node with output {}".format(op.input[0])) + utils.make_sure(op.inputs[0].is_const(), + "non-const default value for PlaceholderWithDefault node '%s' is not supported. " + "Use the --use_default or --ignore_default flags to convert this node.", op.name) + # copy the tensor value, set its name to current node's output, add as initializer + value = op.inputs[0].get_tensor_value(as_list=False) + tensor = numpy_helper.from_array(value, op.output[0]) + initializers.append(tensor) + placeholder_default_const_ops.append(op.inputs[0]) + + # create initializers for constant nodes + const_ops = [op for op in const_ops if op not in placeholder_default_const_ops] + for op in const_ops: + # not to use numpy_helper.from_array to create a new tensor + # because sometimes onnx will have a bug that only check the tensor data in specific field + # such as at upsample it only checks the float_data field. + t = op.get_value_attr(external_tensor_storage) + tensor = helper.get_attribute_value(t) + tensor.name = op.output[0] + initializers.append(tensor) + + # create input_tensor_values + input_ids = [op.output[0] for op in graph_inputs] + # onnx with IR version below 4 requires initializer should be in inputs. + # here we check opset version rather than IR version for the reason: + # https://github.com/onnx/tensorflow-onnx/pull/557 + # opset 9 come with IR 4. + if self.opset < 9: + input_ids += [op.output[0] for op in const_ops] + + input_tensor_values = self.make_onnx_graph_io(input_ids) + + # create output_tensor_values + output_tensor_values = self.make_onnx_graph_io(self.outputs) + + # create graph proto + graph = helper.make_graph([op.op for op in ops], + graph_name, + input_tensor_values, + output_tensor_values, + initializer=initializers, + doc_string=doc) + + return graph + + def make_model(self, graph_doc, optimize=False, graph_name="tf2onnx", external_tensor_storage=None, **kwargs): + """ + Create final ModelProto for onnx from internal graph. + Args: + optimize: optimize graph via onnx + doc: text for doc string of the model + """ + graph = self.make_graph(graph_doc, graph_name, external_tensor_storage) + + if "producer_name" not in kwargs: + kwargs = {"producer_name": "tf2onnx", + "producer_version": __version__} + + if "opset_imports" not in kwargs: + opsets = [] + imp = OperatorSetIdProto() + imp.version = self._opset + opsets.append(imp) + if self.extra_opset is not None: + opsets.extend(self.extra_opset) + kwargs["opset_imports"] = opsets + model_proto = helper.make_model(graph, **kwargs) + + utils.make_sure(self.opset in constants.OPSET_TO_IR_VERSION, + "Opset %s is not supported yet. Please use a lower opset" % self.opset) + + # set the IR version based on opset + try: + model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.opset, model_proto.ir_version) + except: # pylint: disable=bare-except + logger.error("ir_version override failed - install the latest onnx version") + + # optimize the model proto. + # TODO: this is disabled by default because of bugs in fuse_consecutive_transposes + if optimize: + model_proto = optimizer.optimize(model_proto) + return model_proto + + def make_onnx_graph_io(self, ids): + """Create tensor_value_info for passed input/output ids.""" + tensor_value_infos = [] + for name in ids: + dtype = self.get_dtype(name) + shape = self.get_shape(name) + + utils.make_sure(dtype is not None, "missing output dtype for " + name) + # TODO: allow None output shape or not? e.g. shape=(?,) + #utils.make_sure(shape is not None, "missing output shape for " + name) + if shape is None: logger.warning("missing output shape for %s", name) + + v = utils.make_onnx_inputs_outputs(name, dtype, shape) + tensor_value_infos.append(v) + return tensor_value_infos + + def dump_graph(self): + """Dump graph with shapes (helpful for debugging).""" + for node in self.get_nodes(): + input_names = ["{}{}".format(n, self.get_shape(n)) for n in node.input] + logger.debug("%s %s %s %s", + node.type, + self.get_shape(node.output[0]), + node.name, + ", ".join(input_names)) + + def follow_inputs(self, node, num, space=""): + """Follow inputs for (helpful for debugging).""" + val = [] + top = space == "" + if num == 0: + return [] + val.append("{}{} {} {}".format(space, node.type, node.name, self.get_shape(port_name(node.name)))) + space += " " + for j in node.inputs: + val.extend(self.follow_inputs(j, num - 1, space)) + if top: + print("\n".join(reversed(val))) + print() + return [] + return val + + def dump_node_statistics(self): + op_cnt = collections.Counter() + for n in self.get_nodes(): + op_cnt[n.type] += 1 + body_graphs = n.get_body_graphs() + if body_graphs: + for b_g in body_graphs.values(): + op_cnt += b_g.dump_node_statistics() + + return op_cnt + + def remove_input(self, node, to_be_removed, input_index=None): + """Remove input from Node. + Args: + node: the node we expect the input on + to_be_removed: the node name we want to remove + input_index: if not None, index of the input to be removed, + the method is more efficient if *input_index* is specified, + otherwise, it has to look for every input named *old_input*. + """ + assert isinstance(node, Node) and isinstance(to_be_removed, six.text_type) + if input_index is not None: + assert node.input[input_index] == to_be_removed + if node.input[input_index] in self._output_to_consumers: + to_ops = self._output_to_consumers[node.input[input_index]] + if node.name in to_ops: + to_ops.remove(node.name) + del node.input[input_index] + return + + for i, name in enumerate(node.input): + if name == to_be_removed: + utils.make_sure( + node.input.count(node.input[i]) <= 1, + "Node %r takes multiple times the same input %r. This case is not handled.", + node.name, node.input[i]) + self._unregister_input_name(node.input[i], node) + del node.input[i] + break + + # don't remove output from parent since others might depend on it + + def insert_new_node_on_input(self, node, op_type, input_name, name=None, domain=None, **kwargs): + """Create and insert a new node into the graph. + Args: + node: we want to replace the input for this node + op_type: type for new operation + input_name: the name(s) of the outputs above us + if scalar, new node placed above input_name + if list, new node placed above input_name[0]. list is inputs into new node + name: the name of the new op + kwargs: attributes of the new node + + Returns: + node that was inserted + """ + if name is None: + name = utils.make_name(node.name) + new_output = port_name(name) + if not isinstance(input_name, list): + input_name = [input_name] + + new_node = self.make_node(op_type, input_name, attr=kwargs, outputs=[new_output], name=name, domain=domain) + for i, n in enumerate(node.input): + if n == input_name[0]: + self.replace_input(node, node.input[i], new_output, i) + break + return new_node + + def insert_node_on_output(self, node, output_name=None): + """ + The inserted node takes the *output_name* as input and produces a + new output. The function goes through every node taking *output_name* + and replaces it by the new output name. + """ + if output_name is None: + output_name = node.input[0] + new_output = node.output[0] + + to_replace = [self.get_node_by_name(n) for n in self._output_to_consumers[output_name]] + to_replace = [n for n in to_replace if n != node] + self.replace_all_inputs(output_name, new_output, ops=to_replace) + return node + + def insert_new_node_on_output(self, op_type, output_name=None, name=None, inputs=None, domain=None, **kwargs): + """Create and insert a new node into the graph. + It then calls insert_node_on_output. + + Args: + op_type: type for new operation + output_name: the names of the outputs above us + name: the name of the new op + kwargs: attributes of the new node + + Returns: + node that was inserted + """ + utils.make_sure(isinstance(output_name, six.text_type), "output_name's type is not expected: %s", + type(output_name)) + utils.make_sure(isinstance(op_type, six.text_type), "op_type's type is not expected: %s", + type(op_type)) + utils.make_sure(output_name is not None, "output_name cannot be None for op_type=%r.", op_type) + + if inputs is None: + inputs = [output_name] + if name is None: + name = utils.make_name(op_type) + + new_output = port_name(name) + new_node = self.make_node(op_type, inputs, attr=kwargs, outputs=[new_output], name=name, domain=domain) + return self.insert_node_on_output(new_node, output_name) + + def find_output_consumers(self, output_name): + """Find all nodes consuming a given output.""" + if output_name in self._output_to_consumers: + ops = self._output_to_consumers[output_name] + ops = [self.get_node_by_name(n) for n in ops] + else: + ops = [] # self.get_nodes() + nodes = [] + for node in ops: + if node is None: + continue + if output_name in node.input: + nodes.append(node) + + # find consumers in sub graphs + if output_name in self._input_to_graph: + for g in self._input_to_graph[output_name].values(): + nodes.extend(g.find_output_consumers(output_name)) + return nodes + + def _register_input_name(self, input_name, node, only_graph=False): + "Register node taking a specific input." + if not only_graph: + if input_name not in self._output_to_consumers: + self._output_to_consumers[input_name] = set() + self._output_to_consumers[input_name].add(node.name) + if self.parent_graph is not None: + if input_name not in self.parent_graph._input_to_graph: + self.parent_graph._input_to_graph[input_name] = {} + self.parent_graph._input_to_graph[input_name][id(self)] = self + self.parent_graph._register_input_name(input_name, node, only_graph=True) + + def _unregister_input_name(self, input_name, node, only_graph=False): + "Unregister node taking a specific input." + node_name = node.name + if not only_graph: + if input_name in self._output_to_consumers[input_name]: + if node_name in self._output_to_consumers[input_name]: + self._output_to_consumers[input_name].remove(node_name) + if (self.parent_graph is not None and + input_name in self.parent_graph._input_to_graph and + id(self) in self.parent_graph._input_to_graph[input_name]): + del self.parent_graph._input_to_graph[input_name][id(self)] + self.parent_graph._unregister_input_name(input_name, node, only_graph=True) + + def replace_all_inputs(self, old_input, new_input, ops=None): + """ + Replace all inputs pointing to old_input with new_input. + *ops* is used if defined, otherwise `_output_to_consumers` + is used to determine the impacted nodes. + """ + if old_input == new_input: + return + if new_input not in self._output_to_consumers: + self._output_to_consumers[new_input] = set() + + if ops is not None: + keep_ops = True + elif old_input in self._output_to_consumers: + ops = list( + filter(lambda a: a is not None, + map(self.get_node_by_name, self._output_to_consumers[old_input]))) + keep_ops = False + else: + ops = [] + keep_ops = False + + for node in ops: + assert node is not None + if old_input in node.input and new_input in node.output: + raise RuntimeError("creating a circle in the graph is not allowed: " + node.name) + self._register_input_name(new_input, node) + + for i, input_name in enumerate(node.input): + if input_name == old_input: + self.replace_input(node, node.input[i], new_input, i) + + # modify references in sub graphs + if old_input in self._input_to_graph: + for g in self._input_to_graph[old_input].values(): + g.replace_all_inputs(old_input, new_input, + ops=g.get_nodes() if keep_ops else None) + + def replace_input(self, node, old_input, new_input, input_index=None): + """ + Replace one input in a node. + The method is more efficient if *input_index* is specified. + Otherwise, it renames every output named *old_input*. + """ + assert isinstance(node, Node) and isinstance(old_input, six.text_type) and isinstance(new_input, six.text_type) + is_replaced = False + if input_index is None: + for i, input_name in enumerate(node.input): + if input_name == old_input: + node.input[i] = new_input + is_replaced = True + elif node.input[input_index] == old_input: + node.input[input_index] = new_input + is_replaced = True + else: + raise RuntimeError("Unable to replace input %r into %r for node %r." % (old_input, new_input, node.name)) + + to_ops = self._output_to_consumers.get(old_input, None) + if to_ops is not None: + if node.name in to_ops: + # A node may take twice the same entry. + to_ops.remove(node.name) + + self._register_input_name(new_input, node) + return is_replaced + + def replace_inputs(self, node, new_inputs): + """Replace node inputs.""" + assert isinstance(node, Node) and isinstance(new_inputs, list) + + for old_input in node.input: + to_ops = self._output_to_consumers.get(old_input, None) + if to_ops is not None and old_input in to_ops: + # To avoid issues when a node + # takes twice the same entry. + to_ops.remove(old_input) + + for input_name in new_inputs: + assert isinstance(input_name, six.text_type) + self._register_input_name(input_name, node) + + node.input = new_inputs + return True + + def _extract_sub_graph_nodes(self, dest_node, input_checker=None): + """Return nodes of subgraph ending with dest_node. + Args: + dest_node: output node of the subgraph to find + input_checker: customized input check function: bool func(node) + + Return: + a set of nodes + """ + res_set = set() + if not dest_node or (input_checker and input_checker(dest_node) is False): + return res_set + + processing_set = set([dest_node]) + while processing_set: + top_node = processing_set.pop() + res_set.add(top_node) + all_inputs = top_node.input + list(top_node.get_implicit_inputs()) + for input_id in all_inputs: + # we don't care about nested graph here, just handle current graph cropping. + node = self.get_node_by_output(input_id, search_in_parent_graphs=False) + if not node: + # some nodes (for example Scan) have optional inputs, which + # might have empty input. + # subgraph might have input defined in outer graph + continue + if node not in res_set: + if input_checker and input_checker(node) is False: + continue + processing_set.add(node) + return res_set + + def extract_sub_graph_nodes(self, outputs_name, input_checker=None, remove_unused_inputs=True): + """Return nodes of subgraph having output_ids as outputs. + Args: + output_ids: output node output id of the subgraph to find + input_checker: customized input check function: bool func(node) + remove_unused_inputs: bool, indicates whether unused placeholder inputs will be removed + in the resulting nodes. + Return: + a list of nodes + """ + res_set = set() + if not outputs_name: + return list(res_set) + + for output in outputs_name: + node = self.get_node_by_output(output, search_in_parent_graphs=False) + res_set = res_set.union(self._extract_sub_graph_nodes(node, input_checker)) + + if not remove_unused_inputs: + # add back placeholder nodes if they are not connected to outputs. + res_set = res_set.union(self.inputs) + + return list(res_set) + + def delete_unused_nodes(self, outputs_name): + """Delete nodes not in subgraph ending with output_names.""" + if not outputs_name: + logger.debug("Outputs not specified, delete_unused_nodes not taking effect.") + return + + # we need keep those placeholders that are used as input of Loop's body graph. + # some of them are not used in the graph, but still need be there to keep the graph complete. + related_nodes = self.extract_sub_graph_nodes(outputs_name, remove_unused_inputs=False) + for node in related_nodes: + attr_body_graphs = node.get_body_graphs() + if attr_body_graphs: + for body_graph in attr_body_graphs.values(): + body_graph.delete_unused_nodes(body_graph.outputs) + self.reset_nodes(related_nodes) + + def safe_to_remove_nodes(self, to_delete): + """ List of nodes that safe to delete (i.e. outputs not consumed by other nodes.)""" + safe_to_remove = [] + delete_set = set(to_delete) + for n in delete_set: + out_consumers = set() + for out in n.output: + out_consumers |= set(self.find_output_consumers(out)) + if out_consumers.issubset(delete_set): + safe_to_remove.append(n) + return safe_to_remove + + # TODO(tomwildenhain): Remove this function + def safe_remove_nodes(self, to_delete): + """Delete nodes in `to_delete` without third-party node consuming it.""" + delete_set = set(to_delete) + for n in delete_set: + out_consumers = set() + for out in n.output: + out_consumers |= set(self.find_output_consumers(out)) + if out_consumers.issubset(delete_set): + self.remove_node(n.name) + + def is_safe_to_remove_nodes(self, to_delete, outputs_to_ignore=None): + """Returns true if the outputs of all the nodes in to_delete have no third-party nodes consuming them""" + delete_set = set(to_delete) + outputs_to_ignore_set = set(outputs_to_ignore or []) + for n in delete_set: + out_consumers = set() + for out in n.output: + if out in outputs_to_ignore_set: + continue + out_consumers |= set(self.find_output_consumers(out)) + if not out_consumers.issubset(delete_set): + return False + return True + + +class GraphUtil(object): + """Utilities for Graph manipulation.""" + + @staticmethod + def optimize_graph(graph): + return optimizer.optimize_graph(graph) + + @staticmethod + def optimize_model_proto(onnx_model_proto): + """Optimize the model proto, for example: eliminating all useless Transpose pairs. + + Returns: + model proto after optimization, if optimizer run successfully + or onnx_model_proto, if exceptions happens + """ + try: + kwargs = GraphUtil.get_onnx_model_properties(onnx_model_proto) + graph = GraphUtil.create_graph_from_onnx_model(onnx_model_proto) + graph = GraphUtil.optimize_graph(graph) + model_proto = graph.make_model(onnx_model_proto.graph.doc_string, + graph_name=onnx_model_proto.graph.name, **kwargs) + + if onnx_model_proto.metadata_props: + metadata_props = {p.key: p.value for p in onnx_model_proto.metadata_props} + helper.set_model_props(model_proto, metadata_props) + return model_proto + except Exception: + # sometimes, onnx shape inference will fail for some reason, + # return onnx_model_proto for this case + logger.warning("Failed to optimize model proto", exc_info=1) + return onnx_model_proto + + @staticmethod + def get_onnx_model_properties(onnx_model_proto): + """Get ModelProto properties""" + kwargs = {} + if onnx_model_proto.HasField('ir_version'): + kwargs["ir_version"] = onnx_model_proto.ir_version + if onnx_model_proto.HasField('producer_name'): + kwargs["producer_name"] = onnx_model_proto.producer_name + if onnx_model_proto.HasField('producer_version'): + kwargs["producer_version"] = onnx_model_proto.producer_version + if onnx_model_proto.HasField('domain'): + kwargs["domain"] = onnx_model_proto.domain + if onnx_model_proto.HasField('model_version'): + kwargs["model_version"] = onnx_model_proto.model_version + if onnx_model_proto.HasField('doc_string'): + kwargs["doc_string"] = onnx_model_proto.doc_string + kwargs["opset_imports"] = onnx_model_proto.opset_import + + return kwargs + + @staticmethod + def create_graph_from_onnx_model(onnx_model_proto): + """Create Graph loading onnx model proto.""" + # apply shape inference on the model + inferred_model = shape_inference.infer_shapes(onnx_model_proto) + graph_proto = inferred_model.graph + + opset_version = None + extra_opset = [] + for opset in onnx_model_proto.opset_import: + if not opset.domain: + # domain field is None or empty means it is onnx domain + opset_version = opset.version + else: + extra_opset.append(opset) + + utils.make_sure(opset_version is not None, "opset version is not specified for onnx domain") + main_graph = GraphUtil.create_graph_from_onnx_graph(graph_proto, opset_version, extra_opset) + return main_graph + + @staticmethod + def create_graph_from_onnx_graph(graph_proto, opset_version=None, extra_opset=None): + """Create Graph loading onnx graph proto.""" + output_shapes = {} + output_dtypes = {} + + shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.value_info) + output_shapes.update(shapes) + output_dtypes.update(dtypes) + + shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.output) + output_shapes.update(shapes) + output_dtypes.update(dtypes) + + nodes_to_append = [] + for n in graph_proto.node: + if n.op_type == "Constant": + n.op_type = "Const" + + # some pytorch model had empty names - make one up + if not n.name: + n.name = utils.make_name("was_empty") + nodes_to_append.append(n) + + output_names = [] + for n in graph_proto.output: + output_names.append(n.name) + + g = Graph(nodes_to_append, output_shapes, output_dtypes, None, opset_version, extra_opset, None, output_names) + const_nodes = GraphUtil._parse_graph_initializer(g, graph_proto) + GraphUtil._parse_graph_input(g, graph_proto, [n.name for n in const_nodes]) + + for n in g.get_nodes(): + for attr_name, attr_val in n.attr.items(): + if attr_val.HasField('g'): + # it was assumed that the a.g has inferred shapes/dtypes. + sub_g = GraphUtil.create_graph_from_onnx_graph(attr_val.g, opset_version, extra_opset) + n.set_body_graph_as_attr(attr_name, sub_g) + return g + + @staticmethod + def get_node_count_from_onnx_graph(graph_proto): + op_cnt = collections.Counter() + for n in graph_proto.node: + op_cnt[n.op_type] += 1 + return op_cnt + + @staticmethod + def _parse_shape_and_type_from_value_infos(value_infos): + """Get nodes output shapes and types from value infos.""" + output_shapes = {} + output_dtypes = {} + for shape_info in value_infos: + type_proto = shape_info.type + elem_type = type_proto.tensor_type.elem_type + shape = type_proto.tensor_type.shape + tuned_shape = [] + for d in shape.dim: + if d.HasField('dim_param'): + tuned_shape.append(-1) + elif d.HasField('dim_value'): + tuned_shape.append(d.dim_value) + else: + # it is found, some unknown dims is missing after inference. + tuned_shape.append(-1) + output_shapes[shape_info.name] = tuned_shape + output_dtypes[shape_info.name] = elem_type + + return output_shapes, output_dtypes + + @staticmethod + def _parse_graph_initializer(g, graph_proto): + """Get graph initializers and put into Graph object.""" + const_nodes = [] + for initializer in graph_proto.initializer: + np_val = numpy_helper.to_array(initializer) + const_nodes.append(g.make_const(initializer.name, np_val)) + + return const_nodes + + @staticmethod + def _parse_graph_input(g, graph_proto, const_node_names): + """Get graph inputs not defined as initializers and put into Graph object.""" + shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.input) + # make sure the input is added in order we read from graph_proto, + # because for subgraphs, the input orders matter. + for graph_input in graph_proto.input: + name = graph_input.name + shape = shapes[name] + dtype = dtypes[name] + if name not in const_node_names: + g.add_graph_input(name, dtype, shape) + else: + g.add_graph_input_with_default(name, g.get_node_by_name(name), dtype, shape) diff --git a/lib/python3.10/site-packages/tf2onnx/graph_builder.py b/lib/python3.10/site-packages/tf2onnx/graph_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..0e87534bcb5bc269c1dd4de4b5b2f0682e99c226 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/graph_builder.py @@ -0,0 +1,228 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.graph_helper - class to help building graph, such as helping to make complex node +""" + +import numpy as np +from tf2onnx import utils, logging + + +# pylint: disable=missing-docstring + + +logger = logging.getLogger(__name__) + + +class GraphBuilder(object): + """help to build graph""" + def __init__(self, graph): + self._g = graph + + @property + def graph(self): + return self._g + + def make_slice(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False): + """ + slice changes its schema at opset 10: it treats some attributes as dynamic input + so this function has to process inputs according to graph's opset version + to get "inputs" and "attr" to feed "make_node" + kwargs: key could be ["data", "starts", "ends", "axes", "steps", "outputs"]. + """ + outputs = kwargs.pop("outputs", None) + + if self.graph.opset < 10: + # "data" is string + # "starts", "ends" and "axes" are attributes, and "axes" is optional. + data = kwargs.pop("data") + starts = self.convert_to_attribute(kwargs.pop("starts")) + ends = self.convert_to_attribute(kwargs.pop("ends")) + axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True) + attr = {"starts": starts, "ends": ends, "axes": axes} + inputs = [data] + else: + # slice-10 has 3 required inputs "data", "starts", "ends"l + # and 2 optional inputs "axes", "steps" + # input sequence should be "data", "starts", "ends", "axes", "steps" + attr = {} + data = kwargs.pop("data") + starts = self.convert_to_input(kwargs.pop("starts"), "const_starts", dtype=np.int64) + ends = self.convert_to_input(kwargs.pop("ends"), "const_ends", dtype=np.int64) + axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64) + steps = self.convert_to_input(kwargs.pop("steps", None), "const_steps", is_optional=True, dtype=np.int64) + inputs = [data, starts, ends, axes, steps] + + # pro-process inputs and attr + utils.make_sure(not kwargs, "kwargs contains un-used key") + + new_attr = {} + for key, val in attr.items(): + if val is not None: + new_attr[key] = val + attr = new_attr + + for ind, val in enumerate(inputs): + if val is None: + inputs[ind] = utils.ONNX_EMPTY_INPUT # empty string means no connection in ONNX + # remove tailing "" + while inputs[-1] == utils.ONNX_EMPTY_INPUT: + inputs = inputs[:-1] + + if self.graph.opset >= 10: + dtype = self.graph.get_dtype(inputs[1]) + for input_data in inputs[1:]: + if input_data != utils.ONNX_EMPTY_INPUT: + utils.make_sure(dtype == self.graph.get_dtype(input_data), "dtype should be same") + + node = self.graph.make_node(op_type="Slice", inputs=inputs, attr=attr, name=name, + outputs=outputs, shapes=shapes, dtypes=dtypes) + if return_node: + return node + return node.output[0] + + def make_reduce_sum(self, kwargs, name=None, shapes=None, dtypes=None): + """ + ReduceSum changes its schema at opset 13: it treats some axes as dynamic input + kwargs: key could be ["data", "axes", "keepdims", "noop_with_empty_axes", "outputs"]. + """ + outputs = kwargs.pop("outputs", None) + + if self.graph.opset < 13: + data = kwargs.pop("data") + axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True) + keepdims = kwargs.pop("keepdims", 1) + noop_with_empty_axes = kwargs.pop("noop_with_empty_axes", 0) + if noop_with_empty_axes == 0 and axes == []: + axes = None + attr = {"axes": axes, "keepdims": keepdims} + inputs = [data] + else: + keepdims = kwargs.pop("keepdims", 1) + noop_with_empty_axes = kwargs.pop("noop_with_empty_axes", 0) + data = self.convert_to_input(kwargs.pop("data"), "const_data") + axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64) + attr = {"keepdims": keepdims, "noop_with_empty_axes": noop_with_empty_axes} + inputs = [data, axes] + + utils.make_sure(not kwargs, "kwargs contains un-used key") + + new_attr = {} + for key, val in attr.items(): + if val is not None: + new_attr[key] = val + attr = new_attr + + return self.graph.make_node(op_type="ReduceSum", inputs=inputs, attr=attr, name=name, + outputs=outputs, shapes=shapes, dtypes=dtypes).output[0] + + def make_squeeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None): + """ + Squeeze changes its schema at opset 13: it treats axes as a dynamic input + kwargs: key could be ["data", "axes"]. + """ + outputs = kwargs.pop("outputs", None) + + if self.graph.opset < 13: + data = kwargs.pop("data") + axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True) + attr = {"axes": axes} + inputs = [data] + else: + data = kwargs.pop("data") + axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64) + attr = {} + inputs = [data, axes] + + utils.make_sure(not kwargs, "kwargs contains un-used key") + + new_attr = {} + for key, val in attr.items(): + if val is not None: + new_attr[key] = val + attr = new_attr + + for ind, val in enumerate(inputs): + if val is None: + inputs[ind] = utils.ONNX_EMPTY_INPUT # empty string means no connection in ONNX + # remove tailing "" + while inputs[-1] == utils.ONNX_EMPTY_INPUT: + inputs = inputs[:-1] + + node = self.graph.make_node(op_type="Squeeze", inputs=inputs, attr=attr, name=name, + outputs=outputs, shapes=shapes, dtypes=dtypes, + op_name_scope=op_name_scope) + if return_node: + return node + return node.output[0] + + def make_unsqueeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None): + """ + Unsqueeze changes its schema at opset 13: it treats axes as a dynamic input + kwargs: key could be ["data", "axes"]. + """ + outputs = kwargs.pop("outputs", None) + + if self.graph.opset < 13: + data = kwargs.pop("data") + axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True) + attr = {"axes": axes} + inputs = [data] + else: + data = kwargs.pop("data") + axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64) + attr = {} + inputs = [data, axes] + + utils.make_sure(not kwargs, "kwargs contains un-used key") + + new_attr = {} + for key, val in attr.items(): + if val is not None: + new_attr[key] = val + attr = new_attr + + for ind, val in enumerate(inputs): + if val is None: + inputs[ind] = utils.ONNX_EMPTY_INPUT # empty string means no connection in ONNX + # remove tailing "" + while inputs[-1] == utils.ONNX_EMPTY_INPUT: + inputs = inputs[:-1] + + node = self.graph.make_node(op_type="Unsqueeze", inputs=inputs, attr=attr, name=name, + outputs=outputs, shapes=shapes, dtypes=dtypes, + op_name_scope=op_name_scope) + if return_node: + return node + return node.output[0] + + def convert_to_input(self, tensor, const_name, is_optional=False, dtype=None): + """in ONNX, input shold come from node, so it must be a string""" + if is_optional and tensor is None: + return None + + utils.make_sure(tensor is not None, "input is required so it couldn't be None") + + res = tensor + if isinstance(tensor, list): + res = self.graph.make_const(utils.make_name(const_name), np.array(tensor, dtype)).output[0] + + utils.make_sure(isinstance(res, str), "input is a dynamic input, so a str is needed") + + return res + + def convert_to_attribute(self, tensor, is_optional=False): + if is_optional and tensor is None: + return None + + utils.make_sure(tensor is not None, "input is required so it couldn't be None") + + res = tensor + if isinstance(tensor, str): + const_node = self.graph.get_node_by_output(tensor) + res = const_node.get_tensor_value(as_list=True) + + utils.make_sure(isinstance(res, list), "input is an attr, so a list is needed") + + return res diff --git a/lib/python3.10/site-packages/tf2onnx/graph_matcher.py b/lib/python3.10/site-packages/tf2onnx/graph_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3460f07b89ad1aebdd7b63f5b6ffaca6b270428a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/graph_matcher.py @@ -0,0 +1,277 @@ +# SPDX-License-Identifier: Apache-2.0 + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities that match patterns in a tf.Graph.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from itertools import permutations +import six + + +class OpTypePattern(object): + """A tree pattern that matches TF expressions with certain op types.""" + + def __init__(self, op_type, name=None, inputs=None, allow_reorder=None): + """Initializes an OpTypePattern. + + Args: + op_type: string that specifies the allowed types of the root. It can be + (1) an op type, e.g. 'Conv2D', + (2) '*', i.e. wildcard, or + (3) multiple op types separated by '|', e.g., 'Relu|Relu6'. + We could use regex strings, which might be worthwhile when we have many + similar TF op types. + name: Optional string. The name of the pattern that can be looked up in + MatchResult. + inputs: Optional list of `OpTypePattern`s or strings that specify the + patterns for the inputs of a matching op. If None, this pattern accepts + any inputs of a matching op. + allow_reorder: Optional boolean that overrides allow_reorder in GraphMatcher + for this pattern's immediate inputs. + """ + self._op_type = op_type + self._name = name + self.allow_reorder = allow_reorder + if inputs is None: + inputs = [] + self._inputs = [ + input_pattern if isinstance(input_pattern, OpTypePattern) else + OpTypePattern(input_pattern) for input_pattern in inputs + ] + self.op_type_set = set(op_type.split('|')) if op_type else set() + + @property + def op_type(self): + return self._op_type + + @property + def inputs(self): + return self._inputs + + @property + def name(self): + return self._name + + +class MatchResult(object): + r"""Encapsulates the result of a match done by GraphMatcher. + + MatchResult contains a map from OpTypePattern to the matching op and tensor. + When the matching op has multiple output tensors, the matching tensor is the + output tensor used by the matching op of the parent pattern. E.g., when we + match graph + + - + + / \y0 y1/ \ + x split z + | + y (nodes are ops; edges are going up) + + against add_pattern defined as + + y1_pattern = OpTypePattern('*') + z_pattern = OpTypePattern('*') + add_pattern = OpTypePattern('+', inputs=[y1_pattern, z_pattern]) + + the matching op of `y1_pattern` is `split`, and the matching tensor of + `y1_pattern` + is `y1` not `y0`. + """ + + def __init__(self): + self._pattern_to_op_tensor = {} + self._name_to_pattern = {} + + def add(self, pattern, op, tensor): + self._pattern_to_op_tensor[pattern] = op, tensor + if pattern.name is not None: + # allow this so we can apply subgraphs multiple times + # if pattern.name in self._name_to_pattern: + # raise ValueError( + # 'Name %s is already bound to another pattern' % pattern.name) + self._name_to_pattern[pattern.name] = pattern + + def _to_pattern(self, pattern_or_name): + if isinstance(pattern_or_name, OpTypePattern): + return pattern_or_name + + if isinstance(pattern_or_name, six.text_type): + return self._name_to_pattern.get(pattern_or_name) + + raise ValueError('pattern_or_name has type %s. Expect OpTypePattern or str.' + % type(pattern_or_name)) + + def get_op(self, pattern_or_name, default=None): + """ + For now, if the op can not be effectively obtained, then the function will return the default + instead of an error. + """ + op_and_tensor = self._pattern_to_op_tensor.get(self._to_pattern(pattern_or_name)) + if op_and_tensor: + return op_and_tensor[0] + return default + + def get_tensor(self, pattern_or_name, default=None): + """ + For now, if the tensor can not be effectively obtained, then the function will return the default + instead of an error. + """ + op_and_tensor = self._pattern_to_op_tensor.get(self._to_pattern(pattern_or_name)) + if op_and_tensor: + return op_and_tensor[1] + return default + + def get_nodes(self): + return [n[0] for n in self._pattern_to_op_tensor.values()] + + +class GraphMatcher(object): + """Checks if a particular subgraph matches a given pattern.""" + + def __init__(self, pattern, allow_reorder=False): + """Initializes a GraphMatcher. + + Args: + pattern: The `OpTypePattern` against which `GraphMatcher` matches + subgraphs. + """ + self._pattern = pattern + self._allow_reorder = allow_reorder + + @staticmethod + def _is_op_type_same(op, pattern): + if pattern.op_type == "*": + return True + + if op.type in pattern.op_type_set: + return True + + return False + + def _match_pattern(self, pattern, op, tensor): + """Returns whether an TF expression rooted at `op` matches `pattern`. + + If there is a match, adds to `self._match_result` the matching op and tensor + with key `pattern`. + + Args: + pattern: An `OpTypePattern`. + op: A `tf.Operation` to match against the pattern. + tensor: the output `tf.Tensor` of `op` that is used by the matching op of + `pattern`'s parent. Can be None if `pattern` is already the root of the + pattern tree. + + Returns: + if matched return True and match_list whose elem is [pattern, op, tensor] + else return False + the condition that op is matched with pattern: + 1 op is same: + if pattern.op_type is None or *, then treat as same + or op.type in pattern.op_type.split("|") + 2 op.inputs are same with pattern.inputs: + if not pattern.inputs, then treat as same + otherwise, iteratively compare input nodes with pattern. + """ + match_list = [] + if pattern.op_type is None: + return True, match_list + + if self._is_op_type_same(op, pattern): + match_list.append([pattern, op, tensor]) + else: + return False, match_list + + if not pattern.inputs: + # If pattern.inputs is empty, skips the rest and accepts all the inputs. + return True, match_list + + if not op or len(op.inputs) != len(pattern.inputs): + return False, match_list + + allow_reorder = pattern.allow_reorder + if allow_reorder is None: + allow_reorder = self._allow_reorder + if allow_reorder: + pattern_inputs_list = permutations(pattern.inputs) + else: + pattern_inputs_list = [pattern.inputs] + + for possible_pattern_inputs in pattern_inputs_list: + pat = list(zip(op.inputs, possible_pattern_inputs)) + match_flag_of_inputs = [] + match_lists_of_inputs = [] + for input_tensor, input_pattern in pat: + # print("MATCHING", input_pattern.op_type, input_tensor.type) + flag, match_list_of_input = self._match_pattern(input_pattern, input_tensor, input_tensor) + match_flag_of_inputs.append(flag) + match_lists_of_inputs.extend(match_list_of_input) + + if all(match_flag_of_inputs): + match_list.extend(match_lists_of_inputs) + return True, match_list + return False, match_list + + def _parse_match_list_to_match_result(self, match_list): + for pattern, op, tensor in match_list: + self._match_result.add(pattern, op, tensor) + + def match_op(self, op): + """Matches `op` against `self._pattern`. + + Args: + op: `tf.Operation` to match against the pattern. + + Returns: + Returns a `MatchResult` if `op` matches the pattern; otherwise, returns + None. + """ + self._match_result = MatchResult() + match_flag, match_list = self._match_pattern(self._pattern, op, tensor=None) + if not match_flag: + return None + self._parse_match_list_to_match_result(match_list) + return self._match_result + + def match_ops(self, ops): + """Matches each operation in `ops` against `self._pattern`. + + Args: + ops: collection of `tf.Operation` to match against the pattern. + + Yields: + `MatchResult` for each `tf.Operation` that matches the pattern. + """ + for op in ops: + match_result = self.match_op(op) + if match_result: + yield match_result + + def match_graph(self, graph): + """Matches each operation in `graph` against `self._pattern`. + + Args: + graph: `tf.Graph` containing operations to match. + + Yields: + `MatchResult` for each `tf.Operation` in `graph` that matches the pattern. + """ + # Python 3.3.2+ implements `yield from`, but for now: + for match_result in self.match_ops(graph.get_operations()): + yield match_result diff --git a/lib/python3.10/site-packages/tf2onnx/handler.py b/lib/python3.10/site-packages/tf2onnx/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..7ab8172a001179c88282c1bd481853154998a68b --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/handler.py @@ -0,0 +1,152 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Opset registry.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import collections +import inspect + +from tf2onnx import constants + +# pylint: disable=unused-argument,missing-docstring,invalid-name + + +class tf_op: + """Class to implement the decorator to register handlers that map tf to onnx.""" + + # Maps domains (string) to lists (idx represents opset) of dicts (key = op to handle, value = handler) + _OPSETS = collections.OrderedDict() + # Cache of mapping for current domain and opset. Maps op names to handlers [(func, kwargs) tuple] + _MAPPING = None + # Cache of mapping from domain to map of op name to handlers. Used to fetch handlers from different domains + _DOMAIN_MAPPING = None + + def __init__(self, name, domain=constants.ONNX_DOMAIN, **kwargs): + """Called decorator from decorator. + + :param name: The name (or list of names) of the tensorflow operator. + :param domain: The domain the handler requires, defaults to onnx. + :param kwargs: Dictionary that are passed to the handler. A key 'onnx_op' will change the operator name. + """ + if not isinstance(name, list): + name = [name] + self.names = name + self.domain = domain + self.kwargs = kwargs + + def __call__(self, func): + for k, v in inspect.getmembers(func, inspect.ismethod): + if k.startswith("version_"): + version = int(k.replace("version_", "")) + tf_op.register_handler(v, version, self.names, self.domain, self.kwargs) + return func + + def register_compat_handler(self, func, version): + """Register old style custom handler. + + :param func: The handler. + :param version: The version of the handler. + """ + tf_op.register_handler(func, version, self.names, self.domain, self.kwargs) + + @staticmethod + def register_handler(func, version, names, domain, kwargs): + """Register handler. + + :param func: The handler. + :param version: (int) The opset of onnx (or other domain) required for the handler. + :param names: List of names of the operators to convert. + :param domain: The domain the handler requires, defaults to onnx. + + """ + opset = tf_op._OPSETS.get(domain) + if not opset: + opset = [] + tf_op._OPSETS[domain] = opset + while version >= len(opset): + opset.append({}) + opset_dict = opset[version] + for name in names: + opset_dict[name] = (func, kwargs) + + @staticmethod + def get_opsets(): + return tf_op._OPSETS + + @staticmethod + def create_mapping(max_onnx_opset_version, extra_opsets): + """Create the final mapping dictionary by stacking domains and opset versions. + + :param max_onnx_opset_version: The highest onnx opset the resulting graph may use. + :param extra_opsets: Extra opsets the resulting graph may use. + """ + mapping = {constants.ONNX_DOMAIN: max_onnx_opset_version} + if extra_opsets: + for extra_opset in extra_opsets: + mapping[extra_opset.domain] = extra_opset.version + ops_mapping = {} + domain_to_ops_mapping = collections.defaultdict(dict) + for domain, opsets in tf_op.get_opsets().items(): + for target_opset, op_map in enumerate(opsets): + m = mapping.get(domain) + if m: + if target_opset <= m and op_map: + domain_to_ops_mapping[domain].update(ops_mapping) + ops_mapping.update(op_map) + + tf_op._MAPPING = ops_mapping + tf_op._DOMAIN_MAPPING = domain_to_ops_mapping + return ops_mapping + + @staticmethod + def find_effective_op(name, domain=None): + """Find the effective version of an op create_mapping. + This is used if we need to compose ops from other ops where we'd need to find the + op that is going to be used in the final graph, for example there is a custom op + that overrides a onnx op ... + + :param name: The operator name. + :param domain: The domain to use (optional). + """ + if domain is None: + map_info = tf_op._MAPPING.get(name) + else: + map_info = tf_op._DOMAIN_MAPPING[domain].get(name) + if map_info is None: + return None + return map_info + + +class tfl_op: + """Class to implement the decorator to register handlers that map tflite to tf or onnx.""" + + def __init__(self, name, domain=constants.ONNX_DOMAIN, **kwargs): + """Called decorator from decorator. + + :param name: The name (or list of names) of the tflite operator. + :param domain: The domain the operator belongs to, defaults to onnx. Use 'com.google.tensorflow' for tflite->tf + :param kwargs: Dictionary that are passed to the handler. A key 'onnx_op' will change the operator name. + 'tf_op' will convert the op to tf during a tflite to tf conversion pass. + """ + if not isinstance(name, list): + name = [name] + self.names = name + self.domain = domain + self.kwargs = kwargs + + def __call__(self, func): + # Register any handlers of the form 'version_#' + tf_op(self.names, self.domain, **self.kwargs)(func) + # TFLite to TF handlers have the function name 'to_tf' which takes the optional 'tf_op' kwarg + if hasattr(func, 'to_tf'): + tf_op.register_handler(func.to_tf, 0, self.names, 'com.google.tensorflow', self.kwargs) + return func + + @staticmethod + def create_tfl_to_tf_mapping(): + return tf_op.get_opsets()['com.google.tensorflow'][0] diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/__init__.py b/lib/python3.10/site-packages/tf2onnx/rewriter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea340c7e2d92fae0fa9311374b0eba044fbcfeb5 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/__init__.py @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: Apache-2.0 + +"""tf2onnx.rewriter module.""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from tf2onnx.rewriter.cond_rewriter import rewrite_cond +from tf2onnx.rewriter.conv2d_with_pad_rewriter import rewrite_conv2d_with_pad +from tf2onnx.rewriter.dropout_rewriter import rewrite_dropout +from tf2onnx.rewriter.eye_rewriter import rewrite_eye +from tf2onnx.rewriter.flatten_rewriter import rewrite_flatten +from tf2onnx.rewriter.gemm_rewriter import rewrite_gemm +from tf2onnx.rewriter.leakyrelu_rewriter import rewrite_leakyrelu +from tf2onnx.rewriter.random_normal_rewriter import rewrite_random_normal +from tf2onnx.rewriter.random_uniform import rewrite_random_uniform, rewrite_random_uniform_fold_const +from tf2onnx.rewriter.rnn import rewrite_single_direction_lstm, rewrite_bi_direction_lstm, \ + rewrite_single_direction_gru, rewrite_bi_direction_gru, \ + rewrite_custom_rnn_cell, rewrite_generic_loop +from tf2onnx.rewriter.thresholded_relu_rewriter import rewrite_thresholded_relu +from tf2onnx.rewriter.transpose_rewriter import rewrite_transpose +from tf2onnx.rewriter.conv2d_with_add_rewriter import rewrite_biasadd_with_conv2d +from tf2onnx.rewriter.quantization_ops_rewriter import rewrite_quantize_and_dequantize +from tf2onnx.rewriter.layer_normalization_rewriter import rewrite_layer_normalization + + +__all__ = [ + "rewrite_cond", + "rewrite_conv2d_with_pad", + "rewrite_dropout", + "rewrite_eye", + "rewrite_flatten", + "rewrite_gemm", + "rewrite_leakyrelu", + "rewrite_random_normal", + "rewrite_random_uniform", + "rewrite_random_uniform_fold_const", + "rewrite_thresholded_relu", + "rewrite_transpose", + "rewrite_single_direction_lstm", + "rewrite_bi_direction_lstm", + "rewrite_single_direction_gru", + "rewrite_bi_direction_gru", + "rewrite_custom_rnn_cell", + "rewrite_generic_loop", + "rewrite_biasadd_with_conv2d", + "rewrite_quantize_and_dequantize", + "rewrite_layer_normalization" +] diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/bigru_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/bigru_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..fe721626d7fa7864c671ab8185f7c694aa1b1237 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/bigru_rewriter.py @@ -0,0 +1,106 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.bigru_rewriter - bigru support. +This rewriter depends on tf2onnx.rewriter.gru_rewriter's results. +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import numpy as np +from tf2onnx import utils +from tf2onnx.rewriter import rnn_utils + + +logger = logging.getLogger(__name__) + +# pylint: disable=invalid-name,unused-argument,missing-docstring + +def process_bigru(g, bi_grus): + for gru_fw, gru_bw in bi_grus: + logger.debug("=========================") + logger.debug("start handling potential bidirectional gru: %s, %s", gru_fw.name, gru_bw.name) + + w_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 1) + w_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 1) + r_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 2) + r_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 2) + b_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 3) + b_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 3) + W = np.concatenate((w_fw, w_bw), axis=0) + R = np.concatenate((r_fw, r_bw), axis=0) + B = np.concatenate((b_fw, b_bw), axis=0) + + all_nodes = g.get_nodes() + if len(gru_fw.inputs) == len(gru_bw.inputs): + if len(gru_fw.inputs) > 4: + initializer_node = process_init_nodes(g, gru_fw, gru_bw, all_nodes) + else: + logger.error("fw, bw gru inputs num is not consistent. stop") + continue + + # create node + w_name = utils.make_name("W") + w_node = g.make_const(w_name, W, skip_conversion=True) + all_nodes.append(w_node) + + r_name = utils.make_name("R") + r_node = g.make_const(r_name, R, skip_conversion=True) + all_nodes.append(r_node) + + b_name = utils.make_name("B") + b_node = g.make_const(b_name, B, skip_conversion=True) + all_nodes.append(b_node) + gru_inputs = [gru_fw.input[0], w_node.output[0], + r_node.output[0], b_node.output[0]] + if len(gru_fw.inputs) > 4: + gru_inputs.extend([gru_fw.input[4], initializer_node.output[0]]) + + direction = "bidirectional" + attr = {} + for name in rnn_utils.onnx_rnn_attr_mapping[rnn_utils.ONNX_RNN_TYPE.GRU]: + attr_val = gru_fw.get_attr_value(name) + if attr_val: + attr[name] = attr_val + # activation has to be took care, attr here is proto + activations = [act.decode("utf-8") + for act in gru_fw.get_attr_value("activations")] + activations += [act.decode("utf-8") + for act in gru_bw.get_attr_value("activations")] + attr.update({"direction": direction, "activations": activations}) + + bi_gru_node = g.make_node("GRU", gru_inputs, attr=attr, output_count=2) + all_nodes.append(bi_gru_node) + logger.debug("processing output nodes") + + to_remove = [gru_fw.name, gru_fw.input[1], gru_fw.input[2], gru_fw.input[3], + gru_bw.name, gru_bw.input[1], gru_bw.input[2], gru_bw.input[3]] + rnn_utils.slice_birnn_for_original_rnn_consumers( + g, gru_fw, gru_bw, bi_gru_node, 0, all_nodes, to_remove) + rnn_utils.slice_birnn_for_original_rnn_consumers( + g, gru_fw, gru_bw, bi_gru_node, 1, all_nodes, to_remove) + + gru_bw_old_x = gru_bw.input[0] + for n in to_remove: + g.remove_node(n) + + rnn_utils.remove_reverse_in_bw_input(g, gru_bw_old_x, rnn_utils.ONNX_RNN_TYPE.GRU) + + return g.get_nodes() + + +def process_init_nodes(g, gru_fw, gru_bw, to_append): + initializer_node = rnn_utils.process_single_init_node( + g, gru_fw.input[5], gru_bw.input[5], to_append) + + return initializer_node + + +def rewrite_bidirectional_grus(g, ops): + bi_grus = rnn_utils.find_bidirectional_rnns(g, ops, rnn_utils.ONNX_RNN_TYPE.GRU) + + return process_bigru(g, bi_grus) diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/bilstm_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/bilstm_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..95ca89f0fdf5b3dabc701864571b70e72d32844d --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/bilstm_rewriter.py @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.bilstm_rewriter - bilstm support. +This rewriter depends on tf2onnx.rewriter.lstm_rewriter's results. +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import numpy as np +from tf2onnx import utils +from tf2onnx.rewriter import rnn_utils + +logger = logging.getLogger(__name__) + +# pylint: disable=invalid-name,unused-argument,missing-docstring + +def process_bilstm(g, bi_lstms): + for lstm_fw, lstm_bw in bi_lstms: + logger.debug("=========================") + logger.debug("start handling potential bidirectional lstm: %s, %s", lstm_fw.name, lstm_bw.name) + + w_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 1) + w_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 1) + r_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 2) + r_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 2) + b_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 3) + b_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 3) + W = np.concatenate((w_fw, w_bw), axis=0) + R = np.concatenate((r_fw, r_bw), axis=0) + B = np.concatenate((b_fw, b_bw), axis=0) + + all_nodes = g.get_nodes() + if len(lstm_fw.inputs) == len(lstm_bw.inputs): + if len(lstm_fw.inputs) > 4: + h_node, c_node = process_ch_init_nodes(g, lstm_fw, lstm_bw, all_nodes) + else: + logger.error("fw, bw lstm inputs num is not consistent. stop") + continue + + # create node + w_name = utils.make_name("W") + w_node = g.make_const(w_name, W, skip_conversion=True) + all_nodes.append(w_node) + + r_name = utils.make_name("R") + r_node = g.make_const(r_name, R, skip_conversion=True) + all_nodes.append(r_node) + + b_name = utils.make_name("B") + b_node = g.make_const(b_name, B, skip_conversion=True) + all_nodes.append(b_node) + lstm_inputs = [lstm_fw.input[0], w_node.output[0], r_node.output[0], b_node.output[0]] + if len(lstm_fw.inputs) > 4: + lstm_inputs.extend([lstm_fw.input[4], h_node.output[0], c_node.output[0]]) + + attr = {"direction": "bidirectional"} + for name in rnn_utils.onnx_rnn_attr_mapping[rnn_utils.ONNX_RNN_TYPE.LSTM]: + attr_val = lstm_fw.get_attr_value(name) + if attr_val: + attr[name] = attr_val + + bi_lstm_node = g.make_node("LSTM", lstm_inputs, attr=attr, output_count=3) + all_nodes.append(bi_lstm_node) + logger.debug("processing output nodes") + + to_remove = [lstm_fw.name, lstm_fw.input[1], lstm_fw.input[2], lstm_fw.input[3], + lstm_bw.name, lstm_bw.input[1], lstm_bw.input[2], lstm_bw.input[3]] + rnn_utils.slice_birnn_for_original_rnn_consumers( + g, lstm_fw, lstm_bw, bi_lstm_node, 0, all_nodes, to_remove + ) + rnn_utils.slice_birnn_for_original_rnn_consumers( + g, lstm_fw, lstm_bw, bi_lstm_node, 1, all_nodes, to_remove + ) + rnn_utils.slice_birnn_for_original_rnn_consumers( + g, lstm_fw, lstm_bw, bi_lstm_node, 2, all_nodes, to_remove + ) + + lstm_bw_old_x = lstm_bw.input[0] + for n in to_remove: + g.remove_node(n) + + rnn_utils.remove_reverse_in_bw_input(g, lstm_bw_old_x, rnn_utils.ONNX_RNN_TYPE.LSTM) + + return g.get_nodes() + + +def process_ch_init_nodes(g, lstm_fw, lstm_bw, to_append): + h_node = rnn_utils.process_single_init_node(g, lstm_fw.input[5], lstm_bw.input[5], to_append) + c_node = rnn_utils.process_single_init_node(g, lstm_fw.input[6], lstm_bw.input[6], to_append) + + return h_node, c_node + + +def rewrite_bidirectional_lstms(g, ops): + bi_lstms = rnn_utils.find_bidirectional_rnns(g, ops, rnn_utils.ONNX_RNN_TYPE.LSTM) + + return process_bilstm(g, bi_lstms) diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/cond_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/cond_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..ae1c2145d9dc943725f8db64365365e9d91a55eb --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/cond_rewriter.py @@ -0,0 +1,320 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.cond_rewriter +""" + +from __future__ import division +from __future__ import print_function +import logging +import traceback +from collections import OrderedDict +from enum import Enum +from tf2onnx import utils + + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring,unused-argument,broad-except + +class BranchType(Enum): + """Type of branch""" + TRUE = 1 + FALSE = 2 + # TODO: sometimes, the branch depends on control inputs, + # so we just set it unknown + UNKNOWN = 3 + + +class CondBranchContext: + """Context for each branch graph""" + + def __init__(self): + self.output = [] + self.nodes = set() + + +class CondContext: + def __init__(self, cond_scope, pred_input, true_branch_context, + false_branch_context, switchs, merges): + self.cond_scope = cond_scope # name scope for this tf.cond + self.pred_input = pred_input # condition input + self.true_branch_context = true_branch_context + self.false_branch_context = false_branch_context + self.switchs = set(switchs) + self.merges = merges # list of merges in order + + +class CondRewriter: + def __init__(self, g): + self.g = g + + def rewrite(self): + logger.debug("enter cond pre rewrite") + return self.run() + + def run(self): + """tf.cond rewriter""" + # parse tf.cond in topological sort order. + # NOTE: we assume the current graph is a DAG. + name_scope_merges = OrderedDict() + self.g.topological_sort(self.g.get_nodes()) + all_nodes = self.g.get_nodes() + for n in all_nodes: + if self._is_merge(n): + name_scope = utils.tf_name_scope(n.name) + if name_scope not in name_scope_merges: + name_scope_merges[name_scope] = [] + name_scope_merges[name_scope].append(n) + # check if need rewrite + if not name_scope_merges.keys(): + return all_nodes + + for name_scope, merge_nodes in name_scope_merges.items(): + cond_context = None + try: + pred_input, true_branch_context, false_branch_context, switchs = \ + self._parse_cond(name_scope, merge_nodes) + cond_context = CondContext( + name_scope, + pred_input, + true_branch_context, + false_branch_context, + switchs, + merge_nodes + ) + except Exception as ex: + tb = traceback.format_exc() + logger.warning("tf.cond rewrite failed, due to exception: %s, details:%s", ex, tb) + continue + + self._cut_off_connection(cond_context) + self._create_if_node(cond_context) + # remove nodes in If branches explicitly + for n in list(cond_context.true_branch_context.nodes) + list(cond_context.false_branch_context.nodes): + self.g.remove_node(n.name) + logger.debug("cond pre rewrite done") + + return self.g.get_nodes() + + def _get_output_shape_dtype(self, cond_context): + output_shapes = [] + output_dtypes = [] + for i, _ in enumerate(cond_context.true_branch_context.output): + true_output = cond_context.true_branch_context.output[i] + false_output = cond_context.false_branch_context.output[i] + true_shape = self.g.get_shape(true_output) + utils.make_sure(true_shape is not None, "Shape of {} is None".format(true_output)) + true_rank = len(true_shape) + true_dtype = self.g.get_dtype(true_output) + false_shape = self.g.get_shape(false_output) + utils.make_sure(false_shape is not None, "Shape of {} is None".format(false_output)) + false_rank = len(false_shape) + false_dtype = self.g.get_dtype(false_output) + # just require rank is equal + if true_rank != false_rank: + raise RuntimeError( + "the rank of outputs {} and {} mismatch: {}, {}".format( + true_output, + false_output, + true_rank, + false_rank + ) + ) + if true_dtype != false_dtype: + raise RuntimeError( + "the dtype of outputs {} and {} mismatch: {}, {}".format( + true_output, + false_output, + true_dtype, + false_dtype + ) + ) + output_shapes.append(utils.create_vague_shape_like(true_shape)) + output_dtypes.append(true_dtype) + return output_shapes, output_dtypes + + def _create_if_node(self, cond_context): + output_shapes, output_dtypes = self._get_output_shape_dtype(cond_context) + true_graph = utils.construct_graph_from_nodes( + self.g, + list(cond_context.true_branch_context.nodes), + cond_context.true_branch_context.output, + output_shapes, + output_dtypes + ) + false_graph = utils.construct_graph_from_nodes( + self.g, + list(cond_context.false_branch_context.nodes), + cond_context.false_branch_context.output, + output_shapes, + output_dtypes + ) + branches = {"then_branch": true_graph, "else_branch": false_graph} + if_node = self.g.make_node( + "If", + [cond_context.pred_input], + op_name_scope=cond_context.cond_scope, + outputs=[m.output[0] for m in cond_context.merges], + shapes=output_shapes, + dtypes=output_dtypes, + skip_conversion=False, + branches=branches + ) + return if_node + + def _cut_off_connection(self, cond_context): + """Cut off switchs and merges, all changes are based on the origin graph""" + nodes_to_add = [] + logger.debug("cut off switch connection") + # replace switch with identity node + for switch in cond_context.switchs: + shapes = switch.output_shapes + dtypes = switch.output_dtypes + self.g.remove_node(switch.name) + false_switch_id = self.g.make_node( + "Identity", + [switch.input[0]], + outputs=[switch.output[0]], + op_name_scope=cond_context.cond_scope, + shapes=[shapes[0]], + dtypes=[dtypes[0]], + ) + cond_context.false_branch_context.nodes.add(false_switch_id) + true_switch_id = self.g.make_node( + "Identity", + [switch.input[0]], + outputs=[switch.output[1]], + op_name_scope=cond_context.cond_scope, + shapes=[shapes[1]], + dtypes=[dtypes[1]], + ) + cond_context.true_branch_context.nodes.add(true_switch_id) + nodes_to_add.extend([false_switch_id, true_switch_id]) + # replace merge with if node + logger.debug("cut off merge connection") + for n in cond_context.merges: + self.g.remove_node(n.name) + + def _is_merge(self, node): + return node.type == "Merge" + + def _is_switch(self, node): + return node.type == "Switch" + + def _parse_cond(self, name_scope, merge_nodes): + """Parse condition subgraph for these merge nodes""" + true_branch_context, false_branch_context, switchs = self._trace_back(name_scope, merge_nodes) + # find pred output from any switch + pred_input = list(switchs)[0].input[1] + return pred_input, true_branch_context, false_branch_context, switchs + + def _trace_back(self, name_scope, merge_nodes): + """ + Trace back to the switch from merge nodes and collect the nodes + in the true/false branchs of tf.cond respectively, some comments: + 1. According to tf.cond implementation, We make the hypothesis + that one tf.cond cannot comprise successive Switch nodes. + 2. Thank to construct_graph_from_nodes, in which Identity node + will be added to each output of subgraph, we needn't deal with the + branch with only one const node specially. + + TODO: This implement doesn't depend on control inputs. For a price, + in the case that true and false branch both only contain a + const node, we will throw a Exception. + """ + logger.debug("trace back from [%s]", ",".join(n.name for n in merge_nodes)) + true_branch_context = CondBranchContext() + false_branch_context = CondBranchContext() + total_switchs = set() + for merge_node in merge_nodes: + true_branch_nodes, true_output, false_branch_nodes, false_output, switchs = \ + self._trace_back_from_one_merge(merge_node) + true_branch_context.nodes |= set(true_branch_nodes) + true_branch_context.output.append(true_output) + false_branch_context.nodes |= set(false_branch_nodes) + false_branch_context.output.append(false_output) + total_switchs |= switchs + return true_branch_context, false_branch_context, total_switchs + + def _trace_back_from_one_merge(self, merge_node): + """Parse the ingredients (nodes and outputs)of true and false branch""" + logger.debug("trace back from %s", merge_node.name) + true_branch_nodes = None + true_output = None + false_branch_nodes = None + false_output = None + merge_input_1 = merge_node.input[0] + merge_input_2 = merge_node.input[1] + switchs = set() + + def stop_at_switch(node): + if self._is_switch(node): + switchs.add(node) + return False + return True + + branch_nodes_1 = self.g.extract_sub_graph_nodes( + [merge_input_1], + stop_at_switch + ) + branch_nodes_2 = self.g.extract_sub_graph_nodes( + [merge_input_2], + stop_at_switch + ) + branch_type_1 = self._branch_type(merge_input_1, branch_nodes_1) + branch_type_2 = self._branch_type(merge_input_2, branch_nodes_2) + # all possible branch types: UU, UT, UF, TU, TF, FU, FT + if branch_type_1 == BranchType.UNKNOWN and branch_type_2 == BranchType.UNKNOWN: + raise ValueError("Cannot handle the case both true and false branchs only \ + contain const nodes for now.") + if branch_type_1 == branch_type_2: + raise ValueError("true graph and false graph are intersected") + if branch_type_1 == BranchType.TRUE or branch_type_2 == BranchType.FALSE: + true_branch_nodes = branch_nodes_1 + true_output = merge_input_1 + false_branch_nodes = branch_nodes_2 + false_output = merge_input_2 + else: + true_branch_nodes = branch_nodes_2 + true_output = merge_input_2 + false_branch_nodes = branch_nodes_1 + false_output = merge_input_1 + return true_branch_nodes, true_output, false_branch_nodes, false_output, switchs + + def _branch_type(self, branch_output, nodes): + """Infer the branch type (true, false or unknown)""" + branch = BranchType.UNKNOWN + # the branch is empty + if not nodes: + input_node = self.g.get_node_by_output(branch_output) + if self._is_switch(input_node): + if branch_output == input_node.output[0]: + branch = BranchType.FALSE + else: + branch = BranchType.TRUE + return branch + for node in nodes: + for inp in node.input: + input_node = self.g.get_node_by_output(inp) + if self._is_switch(input_node): + if inp == input_node.output[0]: + if branch == BranchType.TRUE: + raise ValueError("true and false graph intersect at {}".format(node.name)) + branch = BranchType.FALSE + else: + if branch == BranchType.FALSE: + raise ValueError("true and false graph intersect at {}".format(node.name)) + branch = BranchType.TRUE + if branch == BranchType.UNKNOWN: + logger.debug( + "branch only contains const node: [%s]", + ",".join(n.name for n in nodes) + ) + return branch + + +def rewrite_cond(g, ops): + return CondRewriter(g).rewrite() diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_add_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_add_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..d8ec1939a4cfe35cd03ba3c2a36a26eed51212ea --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_add_rewriter.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx conv2d op with BiasAdd +""" +from tf2onnx import logging +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring + +def rewrite_biasadd_with_conv2d(g, ops): + pattern = \ + OpTypePattern('BiasAdd', name='biasadd', inputs=[ + OpTypePattern('Conv2D|Conv2DBackpropInput', name='conv', inputs=['*', '*']), '*']) + matcher = GraphMatcher(pattern) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + biasadd = match.get_op('biasadd') + conv = match.get_op('conv') + + #backup the conv and biasadd values + conv_type = conv.type + conv_input = conv.input + conv_attr = conv.attr + dtype = g.get_dtype(conv.output[0]) + shape = g.get_shape(conv.output[0]) + conv_name = biasadd.name + conv_output = biasadd.output + conv_inputs = [conv_input[0], conv_input[1], biasadd.input[1]] + + # Remove the Conv and BiasAdd node + g.remove_node(conv.name) + g.remove_node(biasadd.name) + + g.make_node(conv_type, conv_inputs, attr=conv_attr, name=conv_name, outputs=conv_output, + shapes=[shape], dtypes=[dtype], skip_conversion=False) + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_pad_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_pad_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..d923ad2ff72042f8667b0e432c2b9da22d60bb5a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_pad_rewriter.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx condv2 op with pad +""" + +import numpy as np + +from tf2onnx import handler, logging +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring + + +def rewrite_conv2d_with_pad(g, ops): + pattern = \ + OpTypePattern("Conv2D", name="conv", inputs=[ + OpTypePattern("Pad", name="pad"), + OpTypePattern("*") + ]) + matcher = GraphMatcher(pattern) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + conv = match.get_op("conv") + pad = match.get_op("pad") + paddings = pad.inputs[1] + + if not paddings.is_const(): + continue + mode = pad.get_attr("mode") + if mode: + mode = mode.s.decode("utf-8").lower() + if mode not in [None, "constant"] or len(pad.input) >= 3: + continue + # Conv2D already has a pad + if conv.get_attr("padding").s.decode("utf-8") == "SAME": + continue + + logger.debug("merge pad [%s] into conv [%s]", pad.name, conv.name) + paddings_val = np.array(paddings.get_tensor_value()) + # can't pad on batch or channel dimensions + data_format = conv.get_attr("data_format").s.decode("utf-8") + if data_format == "NHWC": + if np.any(paddings_val[0]) or np.any(paddings_val[3]): + continue + paddings_val = paddings_val[1:3] + else: + if np.any(paddings_val[0]) or np.any(paddings_val[1]): + continue + paddings_val = paddings_val[2:4] + + paddings_val = paddings_val.transpose().flatten() + g.replace_input(conv, conv.input[0], pad.input[0], 0) + # convert Conv2D + conv.type = "Conv2D" + func, _ = handler.tf_op.find_effective_op("Conv2D") + func(g, conv) + conv.skip_conversion = True + conv.set_attr("auto_pad", "NOTSET") + conv.set_attr("pads", paddings_val) + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/custom_rnn_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/custom_rnn_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..b3cc4a1f635d39fc40c8084f12d82bf2b6c9a5dc --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/custom_rnn_rewriter.py @@ -0,0 +1,228 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.custom_rnn_rewriter - custom rnn support +""" + +from __future__ import division +from __future__ import print_function + +import logging +import sys +import traceback + +from onnx import onnx_pb +import numpy as np + +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context +from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT, get_rnn_scope_name, parse_rnn_loop +from tf2onnx import utils + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access + + +class CustomRnnContext(Context): + def __init__(self): + super(CustomRnnContext, self).__init__() + self.rnn_scope = None + self.time_var = None + self.iteration_var = None + + +class CustomRnnRewriter(LoopRewriterBase): + def create_context(self): + return CustomRnnContext() + + def run(self): + logger.debug("enter custom rnn rewriter") + return self.run_internal() + + def need_rewrite(self, context): + context.rnn_scope = get_rnn_scope_name(context.while_context_scope) + + res = parse_rnn_loop(self.g, context.loop_properties, context.rnn_scope, + context.while_context_scope) + if not res: + logger.debug("skip the loop due to parse_rnn_loop failed") + return False + + time_var, iteration_var = res + context.time_var = time_var + context.iteration_var = iteration_var + logger.debug("time var %s - enter input id (%s) shape: %s, output (%s) shape: %s", time_var.enter_name, + time_var.enter_input_id, self.g.get_shape(time_var.enter_input_id), + time_var.switch_true_identity_output.id, time_var.switch_true_identity_output.shape) + + return True + + def rewrite(self, context): + logger.debug("enter rewrite function") + try: + scan_props = context.loop_properties + + state_inputs_initial_values = [] + for state_input in scan_props.state_inputs_initial_values: + if self.g.opset == 8: + nodes = self._adapt_scan_sequence_input_or_output("input", state_input, False) + state_inputs_initial_values.append(nodes[-1].output[0]) + else: # since opset 9 + state_inputs_initial_values.append(state_input) + + scan_inputs_initial_values = [] + for scan_input in scan_props.scan_inputs_initial_values: + if self.g.opset == 8: + nodes = self._adapt_scan_sequence_input_or_output("input", scan_input, False) + scan_inputs_initial_values.append(nodes[-1].output[0]) + else: # since opset 9 + scan_inputs_initial_values.append(scan_input) + + cell_g_info = context.cell_graph + scan_body_g = LoopRewriterBase.construct_graph_from_nodes(self.g, cell_g_info.nodes, cell_g_info.outputs) + for input_tensor_info in scan_props.state_inputs: + scan_body_g.add_graph_input(input_tensor_info.id, input_tensor_info.dtype, input_tensor_info.shape) + + for input_tensor_info in scan_props.scan_inputs: + scan_body_g.add_graph_input(input_tensor_info.id, input_tensor_info.dtype, input_tensor_info.shape) + + branches = {"body": scan_body_g} + scan_node = self._create_scan_node(context, scan_props, + state_inputs_initial_values + scan_inputs_initial_values, + branches=branches) + if not scan_node: + logger.error("failed to create scan node during rewrite") + return REWRITER_RESULT.FAIL + + self._connect_scan_with_output(context, scan_node) + + return REWRITER_RESULT.OK + + except Exception as ex: + tb = traceback.format_exc() + logger.error("custom rnn rewrite failed, due to exception: %s, details:%s", ex, tb) + return REWRITER_RESULT.FAIL + + def _create_scan_node(self, context, scan_props, init_values, branches=None): + logger.debug("create scan node") + # reuse original output connection id (e.g. Exit_XXX), so we don't need set shape. + loop_outputs_shapes = [] + loop_outputs_dtypes = [] + for tensor_value_info in scan_props.state_outputs_exits + scan_props.scan_outputs_exits: + if tensor_value_info.id: + # in opset 8, the first dim of scan output must be batch + if self.g.opset == 8: + loop_outputs_shapes.append([1] + tensor_value_info.shape) + else: + loop_outputs_shapes.append(tensor_value_info.shape) + loop_outputs_dtypes.append(tensor_value_info.dtype) + n = self.g.get_node_by_output(tensor_value_info.id) + self.g.remove_node(n.name) + else: + loop_outputs_shapes.append([-1]) + loop_outputs_dtypes.append(None) + + if self.g.opset == 8: + # here we did not give the sequence_length, because + # current batch size is 1, not original batch size + # original seq_length will be used by the loop body of Scan op. + scan_node = self.g.make_node("Scan", [""] + init_values, op_name_scope="custom_rnn_scan", + attr={"num_scan_inputs": len(scan_props.scan_inputs)}, + output_count=len(scan_props.state_outputs + scan_props.scan_outputs), + shapes=loop_outputs_shapes, dtypes=loop_outputs_dtypes, + skip_conversion=False, branches=branches) + else: + scan_node = self.g.make_node("Scan", init_values, op_name_scope="custom_rnn_scan", + attr={"num_scan_inputs": len(scan_props.scan_inputs)}, + output_count=len(scan_props.state_outputs + scan_props.scan_outputs), + shapes=loop_outputs_shapes, dtypes=loop_outputs_dtypes, + skip_conversion=False, branches=branches) + + return scan_node + + def _connect_scan_with_output(self, context, scan_node): + logger.debug("connect scan output with the graph") + + index = 0 + for out_tensor_value_info in context.loop_properties.state_outputs_exits: + if out_tensor_value_info.id: + if self.g.opset == 8: + nodes = self._adapt_scan_sequence_input_or_output("state_output_reshape", + scan_node.output[index], True) + self.g.replace_all_inputs( + out_tensor_value_info.id, nodes[-1].output[0]) # ops=self.g.get_nodes() + else: # since opset 9 + self.g.replace_all_inputs( + out_tensor_value_info.id, scan_node.output[index]) # ops=self.g.get_nodes() + index += 1 + + for out_tensor_value_info in context.loop_properties.scan_outputs_exits: + if out_tensor_value_info.id: + if self.g.opset == 8: + nodes = self._adapt_scan_sequence_input_or_output("scan_output_reshape", + scan_node.output[index], True) + self.g.replace_all_inputs( + out_tensor_value_info.id, nodes[-1].output[0]) # ops=self.g.get_nodes() + else: # since opset 9 + self.g.replace_all_inputs( + out_tensor_value_info.id, scan_node.output[index]) # ops=self.g.get_nodes() + index += 1 + + def _adapt_scan_sequence_input_or_output(self, target_name, input_id, handle_output=False): + nodes_to_add = [] + shape_node = self.g.make_node("Shape", [input_id]) + nodes_to_add.append(shape_node) + inferred_shape = self.g.get_shape(input_id) + if handle_output is True: + # handle output: + # if required dim values don't contain more than one -1, + # just use a const for Reshape's shape input. + if inferred_shape is not None and inferred_shape[1:].count(-1) <= 1: + new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"), + np.array(inferred_shape[1:], dtype=np.int64)) + nodes_to_add.append(new_shape_node) + else: + # otherwise, get the dim dynamically, e.g. remove the fake batch size (e.g.1) + # from [1, time, real-batch, ...] + origin_shape_node = self.g.make_node("Cast", [shape_node.output[0]], + {"to": onnx_pb.TensorProto.FLOAT}) + nodes_to_add.append(origin_shape_node) + + attr = {"axes": [0], "starts": [1], "ends": [sys.maxsize]} + inputs_map = {"data": origin_shape_node.output[0], **attr} + sliced_shape_node = GraphBuilder(self.g).make_slice(inputs_map) + nodes_to_add.append(self.g.get_node_by_output(sliced_shape_node)) + + new_shape_node = self.g.make_node("Cast", [sliced_shape_node], + {"to": onnx_pb.TensorProto.INT64}) + nodes_to_add.append(new_shape_node) + + new_shape = inferred_shape[1:] + else: + # handle input: + if inferred_shape is not None and inferred_shape.count(-1) <= 1: + new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"), + np.array([1] + inferred_shape, dtype=np.int64)) + nodes_to_add.append(new_shape_node) + else: + # add a fake batch size : 1 + fake_batch_size_node = self.g.make_const(utils.make_name(target_name + "_target_shape"), + np.array([1], dtype=np.int64)) + nodes_to_add.append(fake_batch_size_node) + new_shape_node = self.g.make_node("Concat", + [fake_batch_size_node.output[0], shape_node.output[0]], + attr={"axis": 0}) + nodes_to_add.append(new_shape_node) + new_shape = [1] + inferred_shape + + reshape_node = self.g.make_node("Reshape", [input_id, new_shape_node.output[0]], + shapes=[new_shape], + dtypes=[self.g.get_dtype(input_id)], + op_name_scope=target_name) + nodes_to_add.append(reshape_node) + logger.debug("create Reshape for scan output %s, with output shape %s", + reshape_node.output[0], new_shape) + return nodes_to_add diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/dropout_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/dropout_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..71d6064302999d320cb563e385d642475eb5cd78 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/dropout_rewriter.py @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx dropout op +""" + +import numpy as np +from tf2onnx import utils +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher +from tf2onnx import logging + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring + + +def rewrite_dropout(g, ops): + patterns = [ + OpTypePattern('Mul', name='outputs', inputs=[ + OpTypePattern('RealDiv', name="input2"), + OpTypePattern('Floor', inputs=[ + OpTypePattern('Add', inputs=[ + OpTypePattern("*", name="input3"), + OpTypePattern('RandomUniform|RandomUniformLike'), + ]) + ]), + ]), + OpTypePattern("Mul", name="outputs", inputs=[ + OpTypePattern("Mul", name="input2"), + OpTypePattern("Cast", inputs=[ + OpTypePattern("GreaterEqual", inputs=[ + OpTypePattern("RandomUniform|RandomUniformLike"), + OpTypePattern("*", name="input3") + ]) + ]) + ]), + # pattern for tf-2.0 tf.nn.dropout() + OpTypePattern("Mul", name="outputs", inputs=[ + OpTypePattern("Cast", inputs=[ + OpTypePattern("GreaterEqual", inputs=[ + OpTypePattern("RandomUniform|RandomUniformLike"), + OpTypePattern("*", name="input3") + ]) + ]), + OpTypePattern("Mul", name="input2"), + ]), + ] + for pattern in patterns: + matcher = GraphMatcher(pattern, allow_reorder=True) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + input2 = match.get_op('input2') + input3 = match.get_op('input3') + outputs = match.get_op('outputs') + + if not input3.is_scalar(): + logger.warning("Dropout pattern rooted at %s does not have a " + "constant ratio and cannot be replaced.", outputs.name) + continue + ratio = input3.get_tensor_value() + + if input2.inputs[0].is_scalar(): + data = input2.inputs[1] + scaling_constant = input2.inputs[0].get_tensor_value() + elif input2.inputs[1].is_scalar(): + data = input2.inputs[0] + scaling_constant = input2.inputs[1].get_tensor_value() + else: + logger.warning("Could not find scaling constant for dropout pattern rooted at %s. " + "The pattern will not be replaced with an ONNX dropout node.", outputs.name) + continue + + #The scaling constant should be 1/(1-ratio), otherwise this isn't truly a dropout node + if not np.allclose([1], [scaling_constant * (1 - ratio)]): + logger.warning("Scaling constant %f for dropout pattern rooted at %s is inconsistent with dropout " + "ratio %f. The pattern will not be replaced with an ONNX dropout node.", + scaling_constant, outputs.name, ratio) + continue + + nodes_to_remove = [n for n in match.get_nodes() if n.name != input3.name] + if not g.is_safe_to_remove_nodes(nodes_to_remove, [outputs.output[0]]): + logger.warning("Nodes in dropout pattern rooted at %s cannot be removed because intermediate results " + "of some nodes are referenced elsewhere in graph.", outputs.name) + continue + + op_name = utils.make_name("Dropout") + out_name = utils.port_name(op_name) + new_node = g.make_node( + "Dropout", + inputs=[data.output[0]], + outputs=[out_name], + name=op_name, + attr={"ratio": ratio}, + shapes=[g.get_shape(data.output[0])], + dtypes=[g.get_dtype(data.output[0])] + ) + g.replace_all_inputs(outputs.output[0], new_node.output[0], ops=ops) + for n in nodes_to_remove: + g.remove_node(n.name) + + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/eye_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/eye_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..1994e8b9df7675e3a4ea42ac1c633f6978840545 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/eye_rewriter.py @@ -0,0 +1,172 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.eye_rewriter - supports tf.eye +""" + +from onnx import onnx_pb +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + +# pylint: disable=invalid-name,unused-argument,missing-docstring, unused-variable + + +def rewrite_eye(g, ops): + # schema of eye is eye(num_rows, num_columns=None), if num_columns not specified then it's equal to num_rows + # tf.eye is implemented by a sub_graph which contains op "MatrixDiag" or "MatrixSetDiag" while + # these two ops are un-supported directly in onnx + # but onnx op EyeLike can be used to map the sub_graph + # "rewrite_eye" supports tf.eye(non_const) and tf.eye(non_const1, non_const2). + # tf.eye(const) and tf.eye(const1, const2) are not supported in this rewriter + + # ConstantOfShape in opset 9 is used, so if opset less than 9 then do nothing + if g.opset < 9: + return g.get_nodes() + + pattern1 = \ + OpTypePattern("MatrixDiag", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill", inputs=[ + OpTypePattern("Const", name="fill_value"), + OpTypePattern("ConcatV2", inputs=[ + "*", + "*", + OpTypePattern("Pack", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast") + ]) + ]) + ]) + ]) + pattern2 = \ + OpTypePattern("MatrixSetDiag", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill"), + OpTypePattern("Fill", inputs=[ + OpTypePattern("Const", name="fill_value"), + OpTypePattern("ConcatV2", inputs=[ + "*", + "*", + OpTypePattern("Pack", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast") + ]) + ]) + ]) + ]) + pattern3 = \ + OpTypePattern("MatrixDiag", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill", inputs=[ + OpTypePattern("ConcatV2", inputs=[ + "*", + OpTypePattern("ExpandDims", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast"), + "*" + ]), + "*", + ]), + OpTypePattern("Const", name="fill_value"), + ]) + ]) + pattern4 = \ + OpTypePattern("MatrixSetDiag", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill"), + OpTypePattern("Fill", inputs=[ + OpTypePattern("ConcatV2", inputs=[ + "*", + OpTypePattern("ExpandDims", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast"), + "*" + ]), + "*", + ]), + OpTypePattern("Const", name="fill_value"), + ]), + ]) + pattern5 = \ + OpTypePattern("MatrixDiagV3", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill", inputs=[ + OpTypePattern("ConcatV2", inputs=[ + "*", + OpTypePattern("ExpandDims", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast"), + "*" + ]), + "*", + ]), + OpTypePattern("Const", name="fill_value"), + ]), + "*", "*", "*", "*", + ]) + pattern6 = \ + OpTypePattern("MatrixSetDiagV3", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill"), + OpTypePattern("Fill", inputs=[ + OpTypePattern("ConcatV2", inputs=[ + "*", + OpTypePattern("ExpandDims", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast"), + "*" + ]), + "*", + ]), + OpTypePattern("Const", name="fill_value"), + ]), "*" + ]) + pattern7 = \ + OpTypePattern("MatrixDiag", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill", inputs=[ + OpTypePattern("Reshape", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast"), + "*", + ]), + OpTypePattern("Const", name="fill_value"), + ]) + ]) + pattern8 = \ + OpTypePattern("MatrixSetDiag", name="output_eye_matrix", inputs=[ + OpTypePattern("Fill"), + OpTypePattern("Fill", inputs=[ + OpTypePattern("Reshape", inputs=[ + OpTypePattern("Minimum|Cast", name="min_or_cast"), + "*", + ]), + OpTypePattern("Const", name="fill_value"), + ]) + ]) + + for pattern in [pattern1, pattern2, pattern3, pattern4, pattern5, pattern6, pattern7, pattern8]: + matcher = GraphMatcher(pattern, allow_reorder=True) + match_results = list(matcher.match_ops(ops)) + for match_result in match_results: + if match_result.get_op("fill_value").get_tensor_value() != 1: + continue + + min_or_cast = match_result.get_op("min_or_cast") + if min_or_cast.type == "Minimum": + min_node = min_or_cast + elif min_or_cast.type == "Cast" and min_or_cast.inputs[0].type == "Minimum": + min_node = min_or_cast.inputs[0] + else: + continue + + num_rows = min_node.inputs[0] + num_columns = min_node.inputs[1] + + old_output = match_result.get_op("output_eye_matrix") + output_dtypes = [g.get_dtype(old_output.output[0])] + output_shapes = [g.get_shape(old_output.output[0])] + g.remove_node(old_output.name) + + # onnx op "EyeLike" need a 2D tensor, so generate it + + num_rows = GraphBuilder(g).make_unsqueeze( + {"axes": [0], "data": num_rows.output[0]}, return_node=True) + num_columns = GraphBuilder(g).make_unsqueeze( + {"axes": [0], "data": num_columns.output[0]}, return_node=True) + matrix_shape = g.make_node("Concat", [num_rows.output[0], num_columns.output[0]], attr={"axis": 0}) + # cast nodes added for "ConstantOfShape" in ONNX only accepts int64 data. + matrix_shape_int64 = g.make_node("Cast", matrix_shape.output, attr={"to": onnx_pb.TensorProto.INT64}) + zero_matrix = g.make_node("ConstantOfShape", matrix_shape_int64.output) + + g.make_node("EyeLike", zero_matrix.output, attr={"dtype": output_dtypes[0]}, + name=old_output.name, shapes=output_shapes, dtypes=output_dtypes, outputs=old_output.output) + + return g.get_nodes() diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/flatten_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/flatten_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b8ccf8a8a7d93fca8906010749c48ddd70eccb --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/flatten_rewriter.py @@ -0,0 +1,101 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx flatten op +""" + +import numpy as np + +from tf2onnx import utils +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + + +# pylint: disable=missing-docstring + + +def rewrite_flatten(g, ops): + pattern_fixed_shape_input = \ + OpTypePattern('Reshape', name='reshape', inputs=[ + OpTypePattern("*", name="input"), + OpTypePattern('Pack', name="pack", inputs=[ + OpTypePattern('StridedSlice', name="slice", inputs=[ + "*", "*", "*", "*", + ]), + "*", + ]), + ]) + pattern_non_fixed_shape_input = \ + OpTypePattern('Reshape', name='reshape', inputs=[ + OpTypePattern("*", name="input"), + OpTypePattern('Pack', name="pack", inputs=[ + OpTypePattern('StridedSlice', name="slice", inputs=[ + OpTypePattern('Shape', inputs=[ + OpTypePattern("*", name="input2") + ]), + "*", "*", "*", + ]), + "*", + ]), + ]) + matcher = GraphMatcher(pattern_fixed_shape_input) + match_results_1 = list(matcher.match_ops(ops)) + + matcher = GraphMatcher(pattern_non_fixed_shape_input) + match_results_2 = list(matcher.match_ops(ops)) + + match_results = [(match_results_1, True), (match_results_2, False)] + for match_results, check_fixed_input_shape in match_results: + for match in match_results: + input_node = match.get_op('input') + reshape_node = match.get_op('reshape') + pack_node = match.get_op('pack') + slice_node = match.get_op('slice') + need_rewrite = pack_node.inputs[1].is_const() and pack_node.inputs[1].get_tensor_value() == -1 + if not need_rewrite: + continue + + input_shape = g.get_shape(reshape_node.input[0]) + need_rewrite = input_shape is not None + if not need_rewrite: + continue + + if check_fixed_input_shape: + need_rewrite = slice_node.inputs[0].is_const() and \ + np.array_equal(list(input_shape), list(slice_node.inputs[0].get_tensor_value())) + if not need_rewrite: + continue + + begin = slice_node.inputs[1].get_tensor_value(as_list=False) + end = slice_node.inputs[2].get_tensor_value(as_list=False) + strides = slice_node.inputs[3].get_tensor_value(as_list=False) + need_rewrite = np.array_equal(begin, [0]) and len(end) == 1 and \ + np.array_equal(strides, [1]) and end[0] - begin[0] == 1 + if not need_rewrite: + continue + + to_remove = [n for n in match.get_nodes() if n != input_node] + safe = g.safe_to_remove_nodes(to_remove) + + # Ok if reshape_node is not safe. Will make it safe later. + if len(to_remove) - len(safe) > 1: + continue + + op_name = utils.make_name("Flatten") + out_name = utils.port_name(op_name) + g.make_node("Flatten", [reshape_node.input[0]], outputs=[out_name], name=op_name) + + last_dim = input_shape[-1] + sec_last_dim = input_shape[-2] + new_dim = None + if last_dim > 0 and sec_last_dim > 0: + new_dim = last_dim * sec_last_dim + else: + new_dim = -1 + + g.set_shape(out_name, input_shape[:-2] + [new_dim]) + g.replace_all_inputs(reshape_node.output[0], out_name, ops=ops) + for n in to_remove: + g.remove_node(n.name) + + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/gemm_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/gemm_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd00891b208afcce352d4ecd6169a53e2ef385a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/gemm_rewriter.py @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewrite - rewrite tensorflow subgraph to onnx gemm op +""" +import logging +from onnx import onnx_pb +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + + +# pylint: disable=missing-docstring + +def rewrite_gemm(g, ops): + if g.opset <= 6: + return ops + + # pattern0: alpha*A*B + beta*C + pattern0 = \ + OpTypePattern('Add|AddV2', name='add', inputs=[ + OpTypePattern('Mul', name='mul1', inputs=[ + OpTypePattern('Const', name='alpha'), + OpTypePattern('MatMul', name='matmul') + ]), + OpTypePattern('Mul', name='mul2', inputs=[ + OpTypePattern('Const', name='beta'), + OpTypePattern('*', name='C') + ]) + ]) + + # pattern1: alpha*A*B + C + pattern1 = \ + OpTypePattern('Add|AddV2', name='add', inputs=[ + OpTypePattern('Mul', name='mul1', inputs=[ + OpTypePattern('MatMul', name='matmul'), + OpTypePattern('Const', name='alpha') + ]), + OpTypePattern('*', name='C'), + ]) + + # pattern2: A*B + beta*C + pattern2 = \ + OpTypePattern('Add|AddV2', name='add', inputs=[ + OpTypePattern('MatMul', name='matmul'), + OpTypePattern('Mul', name='mul2', inputs=[ + OpTypePattern('Const', name='beta'), + OpTypePattern('*', name='C') + ]) + ]) + + # pattern3: A*B + C + pattern3 = \ + OpTypePattern('Add|AddV2', name='add', inputs=[ + OpTypePattern('MatMul', name='matmul'), + OpTypePattern('*', name='C'), + ]) + + # pattern4: A*B + c + pattern4 = \ + OpTypePattern('BiasAdd', name='add', inputs=[ + OpTypePattern('MatMul', name='matmul'), + OpTypePattern('*', name='C'), + ]) + + pattern_list = [pattern0, pattern1, pattern2, pattern3, pattern4] + + for pattern in pattern_list: + matcher = GraphMatcher(pattern, allow_reorder=True) + match_results = list(matcher.match_ops(ops)) + if match_results: + for match in match_results: + matmul_node = match.get_op("matmul") + + if g.get_dtype(matmul_node.input[0]) != onnx_pb.TensorProto.FLOAT: + logging.warning(u"For now, onnxruntime only support float32 type for Gemm rewriter") + continue + + attr, is_valid = get_gemm_attr(match) + if not is_valid: + continue + + add_node = match.get_op('add') + input_c_node = match.get_op("C") + a_edge_name = matmul_node.input[0] + b_edge_name = matmul_node.input[1] + c_edge_name = input_c_node.output[0] + + a_mul_b_shape = g.get_shape(matmul_node.output[0]) + c_shape = g.get_shape(c_edge_name) + if c_shape is None: continue + if a_mul_b_shape is None: continue + if -1 in c_shape + a_mul_b_shape: continue + if g.get_rank(a_edge_name) != 2 or g.get_rank(b_edge_name) != 2: continue + compatible = True + for i in range(1, len(c_shape) + 1): + if c_shape[-i] not in [1, a_mul_b_shape[-i]]: + compatible = False + if not compatible: continue + + gemm = g.make_node("Gemm", inputs=[a_edge_name, b_edge_name, c_edge_name], + attr=attr, + shapes=[g.get_shape(add_node.output[0])], + dtypes=[g.get_dtype(add_node.output[0])], op_name_scope=matmul_node.name) + + ops.append(gemm) + g.replace_all_inputs(add_node.output[0], gemm.output[0], ops=ops) + to_delete = [add_node, matmul_node] + g.safe_remove_nodes(to_delete) + return ops + + +def get_gemm_attr(match): + attr = {} + for arg in ["alpha", "beta"]: + arg_op = match.get_op(arg) + if arg_op is not None: + match_args = arg_op.get_tensor_value() + if isinstance(match_args, list): + if len(match_args) != 1: + return attr, False + match_args = match_args[0] + attr[arg] = match_args + for arg in ["matmul"]: + arg_op = match.get_op(arg) + if arg_op is not None: + match_args = arg_op.attr + if isinstance(match_args, dict): + keys = list(match_args.keys()) + if 'transpose_a' not in keys and 'transpose_b' not in keys: + return attr, False + match_args_a = match_args['transpose_a'].i + attr['transA'] = match_args_a + match_args_b = match_args['transpose_b'].i + attr['transB'] = match_args_b + return attr, True diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/gru_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/gru_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..ec04972304c86f2af5978f0d1a08549683d7d3b1 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/gru_rewriter.py @@ -0,0 +1,260 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.gru_rewriter +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import numpy as np +from tf2onnx import utils +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.rewriter.rnn_utils import RNNUnitType, get_weights_from_const_node + +from tf2onnx.rewriter.unit_rnn_rewriter_base import UnitRnnRewriterBase + +# pylint: disable=invalid-name,unused-argument,missing-docstring + + +logger = logging.getLogger(__name__) + + +class GRUUnitRewriter(UnitRnnRewriterBase): + def __init__(self, g): + super(GRUUnitRewriter, self).__init__(g) + self.gru_cell_type = None + self.state_variable_handlers = [ + {"state": (self._state_variable_finder, self._connect_gru_state_to_graph)} + ] + + def run(self): + logger.debug("enter gru rewriter") + return super(GRUUnitRewriter, self).run() + + def find_cell(self, context): + gru_cell_types = [RNNUnitType.GRUCell, RNNUnitType.GRUBlockCell, RNNUnitType.CudnnCompatibleGRUCell] + for cell_type in gru_cell_types: + cell_match = self._match_cell(context, cell_type) + if cell_match: + self.gru_cell_type = cell_type + logger.debug("parsing unit is %s", cell_type) + return cell_match + logger.debug("cannot parse unit") + return None + + def get_weight_and_bias(self, context): + match = context.cell_match + + gate_kernel = get_weights_from_const_node(self.g, match.get_op("gate_kernel")) + gate_bias = get_weights_from_const_node(self.g, match.get_op("gate_bias")) + res = { + "gate_kernel": gate_kernel, + "gate_bias": gate_bias + } + + # differ on memory gate: + # GRUCell: h'_t = tanh(concat(x_t, r_t .* h_t-1) * W + b) + # CudnnCompatibleGRUCell: h'_t = tanh(x_t * W_x + b_x + r_t .* (h_t-1 * W_h + b_h)) + if self.gru_cell_type == RNNUnitType.CudnnCompatibleGRUCell: + hidden_state_kernel = get_weights_from_const_node( + self.g, match.get_op("hidden_state_kernel") + ) + hidden_state_bias = get_weights_from_const_node( + self.g, match.get_op("hidden_state_bias") + ) + hidden_input_kernel = get_weights_from_const_node( + self.g, match.get_op("hidden_input_kernel") + ) + hidden_input_bias = get_weights_from_const_node( + self.g, match.get_op("hidden_input_bias") + ) + if not all(val is not None for val in [ + hidden_state_kernel, hidden_state_bias, + hidden_input_kernel, hidden_input_bias + ]): + logger.debug("rnn weights check failed, skip") + return None + hidden_kernel = np.concatenate([hidden_input_kernel, hidden_state_kernel]) + # apply the linear transformation before multiplying by the output of reset gate + context.attributes["linear_before_reset"] = 1 + res["hidden_kernel"] = hidden_kernel + res["hidden_bias"] = hidden_input_bias + # recurrence bias for hidden gate + res["Rb_h"] = hidden_state_bias + elif self.gru_cell_type in [RNNUnitType.GRUCell, RNNUnitType.GRUBlockCell]: + hidden_kernel = get_weights_from_const_node(self.g, match.get_op("hidden_kernel")) + hidden_bias = get_weights_from_const_node(self.g, match.get_op("hidden_bias")) + res["hidden_kernel"] = hidden_kernel + res["hidden_bias"] = hidden_bias + + if not all(val is not None for val in res.values()): + logger.debug("rnn weights check failed, skip") + return None + + logger.debug("find needed weights") + return res + + def _state_variable_finder(self, context): + if self.gru_cell_type in [ + RNNUnitType.GRUCell, + RNNUnitType.CudnnCompatibleGRUCell + ]: + gru_cell = context.cell_match + return self._find_state_variable_with_select( + context, + gru_cell.get_op("cell_output").output[0], + [gru_cell.get_op("cell_inputs")] + ) + if self.gru_cell_type == RNNUnitType.GRUBlockCell: + gru_block_cell = context.cell_match.get_op("gru_block_cell") + return self._find_state_variable_with_select( + context, + gru_block_cell.output[3], + [gru_block_cell] + ) + return None + + def parse_attributes(self, context): + # in tf, only activation of hidden gate is optional, input and update gate always use sigmoid + match = context.cell_match + activations = ["Sigmoid", "Tanh"] + if self.gru_cell_type == RNNUnitType.GRUCell: + activation_op = match.get_op("optional_activation") + activations = ["Sigmoid", activation_op.type] + context.attributes["activations"] = activations + return True + + def is_valid(self, context): + # except for ct, ht or ct_ht, there are at most 2 state variables + other_state_variables_num = len(context.loop_properties.state_variables) - \ + len(context.state_variables) + if other_state_variables_num > 2: + logger.debug("found %d other state variables", other_state_variables_num) + return False + + # output should be no more than 1 + outputs = context.loop_properties.scan_outputs_exits + if len(outputs) > 1: + logger.debug("found %d outputs for gru: %s", len(outputs), outputs) + return False + return True + + def process_weights_and_bias(self, context): + """ + why split the data in this way should refer to code of tensorflow GRU cell and official document of ONNX GRU + """ + weights = context.weights + # from code of tensorflow GRU cell, it can be known that shape of hidden_kernel(or candidate_kernel) + # is (input_size+hidden_unit, hidden_unit) + hidden_size = weights["hidden_kernel"].shape[1] + input_size = weights["hidden_kernel"].shape[0] - hidden_size + weight_dtype = weights["hidden_kernel"].dtype + bias_dtype = weights["hidden_bias"].dtype + # below code will use same notation as ONNX document + # z means update gate, r means reset gate, h means hidden gate; + # at this time weights of gate include input and state, will split it next + r_kernel, z_kernel = np.split(weights["gate_kernel"], [hidden_size], axis=1) + h_kernel = weights["hidden_kernel"] + r_bias, z_bias = np.split(weights["gate_bias"], [hidden_size], axis=0) + h_bias = weights["hidden_bias"] + # ONNX GRU split weights of input and state, so have to split *_kernel + input_r_kernel, state_r_kernel = np.split(r_kernel, [input_size], axis=0) + input_z_kernel, state_z_kernel = np.split(z_kernel, [input_size], axis=0) + input_h_kernel, state_h_kernel = np.split(h_kernel, [input_size], axis=0) + W_zrh = np.concatenate((input_z_kernel, input_r_kernel, input_h_kernel), axis=1) + R_zrh = np.concatenate((state_z_kernel, state_r_kernel, state_h_kernel), axis=1) + # transpose weight matrix + W_zrh = np.transpose(np.expand_dims(W_zrh, axis=0), axes=(0, 2, 1)) + R_zrh = np.transpose(np.expand_dims(R_zrh, axis=0), axes=(0, 2, 1)) + W_zrh = W_zrh.astype(weight_dtype) + R_zrh = R_zrh.astype(weight_dtype) + assert W_zrh.shape == (1, 3*hidden_size, input_size) + assert R_zrh.shape == (1, 3*hidden_size, hidden_size) + Wb_zrh = np.concatenate((z_bias, r_bias, h_bias), axis=0) + # if tf doesn't provide bias for state, use 0 + zero = np.zeros_like(z_bias) + # Rb_h is set in CudnnCompatibleGRUCell + Rb_h = weights["Rb_h"] if "Rb_h" in weights else zero + Rb_zrh = np.concatenate((zero, zero, Rb_h), axis=0) + B_zrh = np.concatenate((Wb_zrh, Rb_zrh), axis=0) + B_zrh = np.expand_dims(B_zrh, axis=0) + B_zrh = B_zrh.astype(bias_dtype) + assert B_zrh.shape == (1, 6*hidden_size) + # create const ONNX node + w_name = utils.make_name("W") + w_node = self.g.make_const(w_name, W_zrh, skip_conversion=True) + + r_name = utils.make_name("R") + r_node = self.g.make_const(r_name, R_zrh, skip_conversion=True) + + b_name = utils.make_name("B") + b_node = self.g.make_const(b_name, B_zrh, skip_conversion=True) + + context.input_size = input_size + context.hidden_size = hidden_size + context.onnx_input_ids["W"] = w_node.output[0] + context.onnx_input_ids["R"] = r_node.output[0] + context.onnx_input_ids["B"] = b_node.output[0] + + def process_var_init_nodes(self, context): + assert "state" in context.state_variables.keys() + initializer_input_id = context.state_variables["state"].enter_input_id + node = self.g.get_node_by_output(initializer_input_id) + if node.is_const(): + val = node.get_tensor_value(as_list=False) + initial_name = utils.make_name("Const") + new_val = np.expand_dims(val, axis=0) + const_node = self.g.make_const(initial_name, new_val) + context.onnx_input_ids["initial_state"] = const_node.output[0] + return + squeeze_node = GraphBuilder(self.g).make_unsqueeze( + {'data': initializer_input_id, 'axes': [0]}, return_node=True) + to_replace = [n for n in self.g.get_nodes() if n != squeeze_node] + self.g.replace_all_inputs(initializer_input_id, squeeze_node.output[0], ops=to_replace) + context.onnx_input_ids["initial_state"] = squeeze_node.output[0] + + def create_rnn_node(self, context): + # specify if the RNN is forward, reverse, or bidirectional. + # Must be one of forward (default), reverse, or bidirectional. + # Here we won't mark bidirectional/reverse, we will have another rewriter running after this one, + # which will based on patterns to combine a forward GRU and a backward GRU into a bidirectional one. + num_direction = 1 + # todo: input_forget + context.attributes["direction"] = "forward" + context.attributes["hidden_size"] = context.hidden_size + inputs = context.onnx_input_ids + # sequence length is optional + seq_len_input = utils.ONNX_EMPTY_INPUT + if inputs["sequence_lens"]: + seq_len_input = inputs["sequence_lens"] + gru_inputs = [ + inputs["X"], inputs["W"], inputs["R"], inputs["B"], + seq_len_input, inputs["initial_state"]] + x_shape = self.g.get_shape(gru_inputs[0]) + x_seq_length = x_shape[0] + x_batch_size = x_shape[1] + out_dtype = self.g.get_dtype(gru_inputs[0]) + gru_node = self.g.make_node("GRU", gru_inputs, attr=context.attributes, output_count=2, + shapes=[[x_seq_length, num_direction, x_batch_size, context.hidden_size], + [num_direction, x_batch_size, context.hidden_size]], + dtypes=[out_dtype, out_dtype], op_name_scope=context.rnn_scope) + return gru_node + + def _connect_gru_state_to_graph(self, context): + # in tf, state output shape is: [batch, hidden] + # in onnx, output shape is: [number_directions, batch, hidden] + exit_output_id = context.state_variables["state"].exit_output.id + if not exit_output_id: + logger.debug("no one consume state variable") + return + output_id = context.rnn_node.output[1] + gru_state_shape = self.g.get_shape(output_id) + output_shape = [gru_state_shape[1], gru_state_shape[2]] + squeeze_node = GraphBuilder(self.g).make_squeeze( + {'data': output_id, "axes": [0]}, shapes=[output_shape], + dtypes=[self.g.get_dtype(output_id)], return_node=True) + self.g.replace_all_inputs(exit_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes() diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/layer_normalization_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/layer_normalization_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..ca3d64a60e09419a34618395c06c14e844c043df --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/layer_normalization_rewriter.py @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewrite - Rewrites a pattern from the tf layer_norm contrib op. +Converts a mean/variance normalization pattern (using ReduceMean, RSqrt, Sub, Mul, etc.) into InstanceNormalization +""" +from onnx import TensorProto, helper +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher +from tf2onnx.graph_builder import GraphBuilder + + +# pylint: disable=missing-docstring + +def rewrite_layer_normalization(g, ops): + # Needs ConstantOfShape + if g.opset <= 9: + return ops + + inner_pattern = \ + OpTypePattern('Rsqrt', inputs=[ + OpTypePattern('Add', inputs=[ + OpTypePattern('Mean', allow_reorder=False, inputs=[ + OpTypePattern('Square', inputs=[ + OpTypePattern('Sub', allow_reorder=False, inputs=[ + OpTypePattern('*', name='input'), + OpTypePattern('Mean', name='mean', allow_reorder=False, inputs=[ + OpTypePattern('*', name='input_r2'), + OpTypePattern('Const|ConstV2', name='mean_axes') + ]) + ]) + ]), + OpTypePattern('Const|ConstV2', name='variance_axes') + ]), + OpTypePattern('Const|ConstV2', name='epsilon') + ]) + ]) + + pattern0 = \ + OpTypePattern('Add', name='bias_add', inputs=[ + OpTypePattern('Mul', name='scale_mul', inputs=[ + OpTypePattern('Mul', inputs=[ + inner_pattern, + OpTypePattern('*', name='scale') + ]), + OpTypePattern('Sub', inputs=[ + OpTypePattern('*', name='input_r3'), + OpTypePattern('Mean', name='mean_r2') + ]) + ]), + OpTypePattern('*', name='bias') + ]) + pattern1 = \ + OpTypePattern('Add', name='bias_add', inputs=[ + OpTypePattern('Mul', name='scale_mul', inputs=[ + OpTypePattern('Mul', inputs=[ + inner_pattern, + OpTypePattern('Sub', inputs=[ + OpTypePattern('*', name='input_r3'), + OpTypePattern('Mean', name='mean_r2') + ]) + ]), + OpTypePattern('*', name='scale') + ]), + OpTypePattern('*', name='bias'), + ]) + pattern2 = \ + OpTypePattern('Add', name='bias_add', inputs=[ + OpTypePattern('Mul', name='scale_mul', inputs=[ + OpTypePattern('Mul', inputs=[ + OpTypePattern('*', name='scale'), + OpTypePattern('Sub', inputs=[ + OpTypePattern('*', name='input_r3'), + OpTypePattern('Mean', name='mean_r2') + ]) + ]), + inner_pattern + ]), + OpTypePattern('*', name='bias'), + ]) + + pattern_list = [pattern0, pattern1, pattern2] + + for pattern in pattern_list: + matcher = GraphMatcher(pattern, allow_reorder=True) + match_results = list(matcher.match_ops(ops)) + if match_results: + for match in match_results: + inp_node = match.get_op('input') + rank = g.get_rank(inp_node.output[0]) + node = match.get_op('bias_add') + if inp_node.name != match.get_op('input_r2').name or inp_node.name != match.get_op('input_r3').name: + continue + if match.get_op('mean').name != match.get_op('mean_r2').name: + continue + inp = match.get_op('mean').input[0] + if rank != 3: + continue + mean_axes = match.get_op('mean_axes').get_tensor_value(as_list=True) + variance_axes = match.get_op('variance_axes').get_tensor_value(as_list=True) + mean_axes = [a % rank for a in mean_axes] + variance_axes = [a % rank for a in variance_axes] + if mean_axes != [2] or variance_axes != [2]: + continue + epsilon = match.get_op('epsilon').get_tensor_value(as_list=False).flatten().tolist() + if len(epsilon) != 1: + continue + scale = match.get_op('scale').output[0] + bias = match.get_op('bias').output[0] + shape = g.make_node("Shape", [inp]).output[0] + dim_2_shape = GraphBuilder(g).make_slice( + {"data": shape, "ends": [2], "starts": [1], "axes": [0]}) + zero_tensor = helper.make_tensor("value", TensorProto.FLOAT, dims=[1], vals=[0]) + one_tensor = helper.make_tensor("value", TensorProto.FLOAT, dims=[1], vals=[1]) + zeros_of_shape = g.make_node("ConstantOfShape", [dim_2_shape], attr={'value': zero_tensor}).output[0] + ones_of_shape = g.make_node("ConstantOfShape", [dim_2_shape], attr={'value': one_tensor}).output[0] + norm = g.make_node("InstanceNormalization", [inp, ones_of_shape, zeros_of_shape], + attr={'epsilon': epsilon[0]}, op_name_scope=node.name).output[0] + mul = g.make_node("Mul", [norm, scale]).output[0] + add = g.make_node("Add", [mul, bias]).output[0] + g.replace_all_inputs(node.output[0], add) + g.remove_node(node.name) + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/leakyrelu_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/leakyrelu_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..ad18cc7f2c17278e6dd1d7b1fbc2dc6b0d3fad3e --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/leakyrelu_rewriter.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx leakyrelu op +""" + +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + + +# pylint: disable=missing-docstring + + +def rewrite_leakyrelu(g, ops): + if g.opset < 6: + return ops + + pattern = \ + OpTypePattern('Maximum', name='max', inputs=[ + OpTypePattern('Mul', name='mul', inputs=[ + OpTypePattern('Const', name='alpha'), + OpTypePattern('*', name='mul_input'), + ]), + OpTypePattern('*', name='max_input'), + ]) + + matcher = GraphMatcher(pattern, allow_reorder=True) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + max_node = match.get_op('max') + max_input_node = match.get_op('max_input') + mul_node = match.get_op("mul") + mul_input_node = match.get_op('mul_input') + + max_input_edge_name = _find_edge_name_between_nodes(max_input_node, max_node) + mul_input_edge_name = _find_edge_name_between_nodes(mul_input_node, mul_node) + if max_input_edge_name == mul_input_edge_name: + alpha = match.get_op("alpha").get_tensor_value() + if alpha >= 1: + continue + leakyrelu = g.make_node("LeakyRelu", inputs=[max_input_edge_name], attr={"alpha": alpha}, + shapes=[g.get_shape(max_node.output[0])], dtypes=[g.get_dtype(max_node.output[0])]) + ops.append(leakyrelu) + g.replace_all_inputs(max_node.output[0], leakyrelu.output[0], ops=ops) + to_delete = [max_node, mul_node] + g.safe_remove_nodes(to_delete) + + return ops + + +def _find_edge_name_between_nodes(src_node, consumer_node): + # find the first edge connection between two nodes. + for consumer_end in consumer_node.input: + for src_end in src_node.output: + if consumer_end == src_end: + return consumer_end + return None diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..6fcaa06eed2cd284ae6d4054791e487d826e0c2d --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter.py @@ -0,0 +1,171 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.loop_rewriter - generic loop support +""" + +from __future__ import division +from __future__ import print_function + +import logging +import sys +import traceback + +from onnx import TensorProto +import numpy as np + +from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context +from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT +from tf2onnx import utils +from tf2onnx.graph_builder import GraphBuilder + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access + + +class LoopRewriter(LoopRewriterBase): + + def create_context(self): + return Context() + + def run(self): + logger.debug("enter loop rewriter") + return self.run_internal() + + def need_rewrite(self, context): + return True + + def rewrite(self, context): + logger.debug("enter rewrite function") + loop_node = None + try: + loop_props = context.loop_properties + cell_g_info = context.cell_graph + cond_g_info = context.cond_graph + + # create a dummy loop to calculate the init condition + init_cond_output = self._create_subgraph_initial_cond(cond_g_info) + + ## create Loop body graph with existing nodes + + body_nodes = set(cell_g_info.nodes + cond_g_info.nodes) + body_outputs = cond_g_info.outputs + cell_g_info.outputs + for out_tensor_value_info in body_outputs: + shape = out_tensor_value_info.shape + utils.make_sure( + shape is not None, + "Conversion of Loop requries output shape [{}] exists".format(out_tensor_value_info.id) + ) + out_tensor_value_info.shape = utils.create_vague_shape_like(shape) + + loop_body_g = LoopRewriterBase.construct_graph_from_nodes(self.g, body_nodes, body_outputs) + + # create loop body graph inputs + loop_body_g.add_graph_input(utils.make_name("i"), TensorProto.INT64, ()) + loop_body_g.add_graph_input(utils.make_name("cond"), TensorProto.BOOL, ()) + for i, tensor_value_info in enumerate(loop_props.state_inputs): + input_name = tensor_value_info.id + if input_name is None: + # if the variable is not used in the body graph, then we created a fake one, + # the same type and shape as its corresponding output. + out_tensor_value_info = loop_props.state_outputs[i] + dtype = out_tensor_value_info.dtype + shape = out_tensor_value_info.shape + input_name = utils.make_name("unused_state_input_") + else: + dtype = tensor_value_info.dtype + shape = tensor_value_info.shape + + loop_body_g.add_graph_input(input_name, dtype, utils.create_vague_shape_like(shape)) + + for input_ta in loop_props.tensor_array_inputs: + # Loop does not have scan inputs, so we use Gather to get data for each iteration. + gb = GraphBuilder(loop_body_g) + index_node = gb.make_unsqueeze({'data': input_ta.index_input_id, "axes": [0]}, return_node=True) + gather_node = loop_body_g.make_node("Gather", [input_ta.data_input_id, index_node.output[0]]) + data_node = gb.make_squeeze({'data': gather_node.output[0], "axes": [0]}, return_node=True) + loop_body_g.replace_all_inputs(input_ta.consumer.id, data_node.output[0]) # ops=loop_body_g.get_nodes() + + ## create Loop node + branches = {"body": loop_body_g} + loop_node = self._create_loop_node(context, loop_props, init_cond_output, branches=branches) + if not loop_node: + logger.error("failed to create loop node during rewrite") + return REWRITER_RESULT.FAIL + + logger.debug("rewrite successfully") + return REWRITER_RESULT.OK + + except Exception as ex: + tb = traceback.format_exc() + logger.error("loop rewrite failed, due to exception: %s, details:%s", ex, tb) + return REWRITER_RESULT.FAIL + + def _create_subgraph_initial_cond(self, cond_graph): + """Create subgraph to calculate initial cond.""" + # copy condition subgraph to parent graph + copied_nodes = [] + name_scope = utils.make_name("copy") + for node in cond_graph.nodes: + new_name = "{}/{}".format(name_scope, node.name) + new_outputs = ["{}/{}".format(name_scope, out) for out in node.output] + # some inputs are out of cond_graph.nodes, keep them intact + new_inputs = [] + for inp in node.input: + if self.g.get_node_by_output(inp) in cond_graph.nodes: + new_inputs.append("{}/{}".format(name_scope, inp)) + else: + new_inputs.append(inp) + + new_node = self.g.make_node( + node.type, new_inputs, outputs=new_outputs, + attr=node.attr, name=new_name, + shapes=node.output_shapes, dtypes=node.output_dtypes, + skip_conversion=node.skip_conversion, infer_shape_dtype=False + ) + body_graphs = node.graph.contained_graphs.pop(node.name, None) + if body_graphs: + for attr_name, body_graph in body_graphs.items(): + body_graph.parent_graph = self.g + new_node.set_body_graph_as_attr(attr_name, body_graph) + copied_nodes.append(new_node) + + # replace all inputs of condition graph by initializer (enter_input) + for loop_var in cond_graph.dependent_vars: + self.g.replace_all_inputs( + loop_var.next_iteration_input.id, + loop_var.enter_input_id, ops=copied_nodes) + init_cond_output = "{}/{}".format(name_scope, cond_graph.outputs[0].id) + self.g.set_dtype(init_cond_output, cond_graph.outputs[0].dtype) + self.g.set_shape(init_cond_output, cond_graph.outputs[0].shape) + return init_cond_output + + def _create_loop_node(self, context, loop_props, init_cond_output, branches=None): + loop_outputs = [] + loop_output_shapes = [] + loop_output_dtypes = [] + for tensor_value_info in loop_props.state_outputs_exits + loop_props.scan_outputs_exits: + if tensor_value_info.id: + loop_outputs.append(tensor_value_info.id) + loop_output_shapes.append(tensor_value_info.shape) + loop_output_dtypes.append(tensor_value_info.dtype) + n = self.g.get_node_by_output(tensor_value_info.id) + self.g.remove_node(n.name) + else: + loop_outputs.append(utils.make_name("unused_loop_output_")) + loop_output_shapes.append([-1]) + loop_output_dtypes.append(None) + + # trip count and cond are not used, giving them values just because bug + # (https://github.com/Microsoft/onnxruntime/issues/255) of onnxruntime. + trip_cnt = self.g.make_const(utils.make_name("trip_count"), np.array(sys.maxsize, dtype=np.int64)) + loop_node = self.g.make_node("Loop", [trip_cnt.output[0]] + [init_cond_output] + + loop_props.state_inputs_initial_values, # ONNX Loop support state inputs only + outputs=loop_outputs, op_name_scope="generic_loop", + shapes=loop_output_shapes, dtypes=loop_output_dtypes, + skip_conversion=False, branches=branches) + + return loop_node diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter_base.py b/lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3d7c03b01f1edc7e0b71cee682a841c9806a1949 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter_base.py @@ -0,0 +1,451 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.loop_rewriter_base +""" + +from __future__ import division +from __future__ import print_function + +import logging +from collections import OrderedDict +from tf2onnx import utils +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher +from tf2onnx.utils import is_tf_loopcond_op, is_tf_tensor_array_op +from tf2onnx.utils import is_tf_tensor_array_gather_op, is_tf_tensor_array_write_op +from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT +from tf2onnx.utils import TensorValueInfo + + +logger = logging.getLogger(__name__) +INVALID_INPUT_ID = utils.make_name("invalid_input_id") + +# todo(pengwa) remove protected-access with changes to Graph/Node later. +# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,protected-access + + +class Context(object): + def __init__(self): + self.while_context_scope = None + self.loop_properties = LoopProperties() + self.loop_cond = None + + self.cell_graph = None # GraphInfo of cell graph + self.cond_graph = None # GraphInfo of condition graph + + +class GraphInfo(object): + def __init__(self, ops, inputs, outputs): + self.nodes = ops + self.inputs = inputs # list of TensorValueInfo in order + self.outputs = outputs # list of TensorValueInfo in order + self.dependent_vars = None + + +class LoopProperties(object): + def __init__(self): + # use enter name as key, they are initial inputs. + # we don't use enter_input_id because it might be + # used as initial input for more than one Enter nodes. + self.state_variables = OrderedDict() + self.scan_variables = OrderedDict() + + self.tensor_array_inputs = [] # list of type InputTensorArray + + def add_variable(self, var): + utils.make_sure(var.enter_name not in self.scan_variables, + "variable %s already exists as scan variable.", var.enter_name) + utils.make_sure(var.enter_name not in self.state_variables, + "variable %s already exists as state variable.", var.enter_name) + if not var.is_tensor_array: + self.state_variables[var.enter_name] = var + else: + self.scan_variables[var.enter_name] = var + + def get_variables(self, checker): + if not checker: + return self.all_variables.values() + return [v for v in self.all_variables.values() if checker(v)] + + @property + def all_variables(self): + items = self.state_variables.copy() + items.update(self.scan_variables) + return items + + # state inputs and outputs are in pairs, even though some outputs are not depending on corresponding input, + # we leave the input id be None. + @property + def state_inputs(self): + return [v.switch_true_identity_output for v in self.state_variables.values()] + + @property + def state_inputs_initial_values(self): + return [v.enter_input_id for v in self.state_variables.values()] + + @property + def state_outputs(self): + return [v.next_iteration_input for v in self.state_variables.values()] + + @property + def state_outputs_exits(self): + return [v.exit_output for v in self.state_variables.values()] + + # scan output (e.g. tensor array) won't be used by next iteration calculation + @property + def scan_outputs(self): + return [v.next_iteration_input for v in self.scan_variables.values()] + + @property + def scan_outputs_exits(self): + return [v.exit_output for v in self.scan_variables.values()] + + # treat input tensor array as scan inputs + def add_scan_input(self, input_tensor_array): + self.tensor_array_inputs.append(input_tensor_array) + + # usually it is called TensorArrayReadV3 + @property + def scan_inputs(self): + return [i.consumer for i in self.tensor_array_inputs] + + @property + def scan_inputs_initial_values(self): + return [i.data_input_id for i in self.tensor_array_inputs] + +class LoopVariable(object): + """In TensorFlow loop, all loop variables are listed both in iteration body graph's inputs, and outputs. + Loop (state variable 1, state variable 2) { + # do the calculation + # updated state variable 1 not necessarily only depends on state variable 1, it might depend + # on 0, 1 or more state variables. + # So if it depends on 0 state variable, then switch_true_identity_output.id is None. For this case, + # during conversion, a fake input for ONNX Loop body graph is created, but not consumed by any node. + return (updated) state variable 1, (updated) state variable 2, scan variable 1, scan variable 2 + } + + Here we take the perspective of body graph's outputs: + 1. start from the iteration body graph's output (e.g. next_iteration_input.id) + 2. find body graph generating it (those node between NextIteration and Switch) + 3. find the variable initial value (e.g. enter_input_id) + 4. check whether it is a tensor array + 5. the body graph output might go to next iteration as corresponding input + (e.g. switch_true_identity_output.id). + """ + def __init__(self, enter_name, enter_input_id, next_iteration_input_id, + switch_true_identity_output_id, exit_output_id, is_tensor_array, ta_index_id, g): + self.enter_name = enter_name + self.enter_input_id = enter_input_id + + # the output of iteration body graph for this variable + # should not be None + utils.make_sure(next_iteration_input_id, "next_iteration_input_id should not be None") + self.next_iteration_input = TensorValueInfo(next_iteration_input_id, g) + + # the starting point of iteration body graph, + # might be None when this variable value (either initial value or last iteration output value) + # is not consumed iteration body graph nodes. + self.switch_true_identity_output = TensorValueInfo(switch_true_identity_output_id, g) + + # the switch_false branch is ended with Exit, which is a boundary for the loop, + # might be None when no consumers for the variable output. + self.exit_output = TensorValueInfo(exit_output_id, g) + + # only applicable for tensor array variable + self.is_tensor_array = is_tensor_array + # todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration. + # then we can be sure this is equivalent to scan output behavior. + self.ta_index_id = ta_index_id + + +class InputTensorArray(object): + def __init__(self, data_input_id, index_input_id, consumer_id, g): + self.index_input_id = index_input_id + self.data_input_id = data_input_id + + # tensor array is unstacked before being used in loop, consumer_id is the node + # (in the iteration body graph) consuming one of the element of tensor array. + self.consumer = TensorValueInfo(consumer_id, g) + + +class LoopRewriterBase(object): + def __init__(self, g): + self.g = g + self.ta_read_input_pattern = \ + OpTypePattern("TensorArrayReadV3", name="ta_read", inputs=[ + OpTypePattern("Enter", name="ta_enter", inputs=[ + OpTypePattern("TensorArrayV3") + ]), + OpTypePattern("Identity", name="ta_index"), + OpTypePattern("Enter", name="ta_scatter_enter", inputs=[ + OpTypePattern("TensorArrayScatterV3", name="ta_input_scatter") + ]), + ]) + + def create_context(self): + return Context() + + def need_rewrite(self, context): + return False + + def rewrite(self, context): + return REWRITER_RESULT.FAIL + + def run_internal(self): + loopcond_ops = [] + for op in self.g.get_nodes(): + if is_tf_loopcond_op(op): + loopcond_ops.append(op) + + # self.g.get_nodes may change inside this loop so that we parse all LoopCond first + for op in loopcond_ops: + logger.debug("======================\n handling loop cond node called %s", op.name) + context = self.create_context() + context.loop_cond = op + + self._check_in_read_only_mode(context) + + if self.need_rewrite(context): + # cut off connection between cell/cond graphs and useless nodes like Merge, NextIteration. + self._cut_off_connection_for_cell(context) + context.cell_graph = self._crop_loop_body_sub_graph(context) + context.cond_graph = self._crop_loop_condition_sub_graph(context) + + _result = self.rewrite(context) + if _result == REWRITER_RESULT.OK: + logger.debug("rewrite successfully") + elif _result == REWRITER_RESULT.SKIP: + logger.debug("rewrite skipped for LoopCond called %s", op.name) + continue + elif _result == REWRITER_RESULT.FAIL: + raise ValueError("rewrite failed, so just fast fail it") + + if self.g.outputs: + # clean the graph based on output names. + self.g.delete_unused_nodes(self.g.outputs) + return self.g.get_nodes() + + def _check_in_read_only_mode(self, context): + self._parse_loop_variables(context) + self._parse_input_ta(context) + + def _parse_loop_variables(self, context): + loop_cond_op = context.loop_cond + parts = loop_cond_op.name.split('/') + context.while_context_scope = '/'.join(parts[0:-1]) + "/" + logger.debug("found while loop scope %s", context.while_context_scope) + + switch_nodes = self.g.find_output_consumers(loop_cond_op.output[0]) + for s in switch_nodes: + if s.type != 'Switch': + raise ValueError("LoopCond's output node should be followed with a Switch node") + + loop_var = self._get_loop_var_from_switch(s) + context.loop_properties.add_variable(loop_var) + + def _parse_input_ta(self, context): + graph_inputs = [v.switch_true_identity_output.id for v in context.loop_properties.all_variables.values() + if v.switch_true_identity_output.id] + matcher = GraphMatcher(self.ta_read_input_pattern, allow_reorder=False) + match_results = matcher.match_ops(self.g.get_nodes()) + match_results = [r for r in match_results if r.get_op("ta_index").output[0] in graph_inputs] + for match in match_results: + ta_input_scatter = match.get_op("ta_input_scatter") + # the 3rd input of scatter is the value + data_input_id = ta_input_scatter.input[2] + ta_read_node = match.get_op("ta_read") + + # todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration. + # then we can be sure this is equivalent to scan input behavior. + index_input_id = ta_read_node.input[1] + unstacked_ta_consumer = match.get_op("ta_read").output[0] + ta = InputTensorArray(data_input_id, index_input_id, unstacked_ta_consumer, self.g) + context.loop_properties.add_scan_input(ta) + + def _crop_loop_body_sub_graph(self, context): + # according to input and output, find the body graph + loop_props = context.loop_properties + inputs = loop_props.state_inputs + loop_props.scan_inputs + input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs] + + outputs = loop_props.state_outputs + loop_props.scan_outputs + output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs] + ops, enter_nodes, _ = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=False) + + for enter_node in enter_nodes: + # connect Enter's output to Enter's input + self.g.replace_all_inputs(enter_node.output[0], enter_node.input[0], ops=ops) + + return GraphInfo(ops, inputs, outputs) + + def _crop_loop_condition_sub_graph(self, context): + input_ids = [] + output_ids = [context.loop_cond.input[0]] + outputs = [TensorValueInfo(o, self.g) for o in output_ids] + ops, enter_nodes, merge_nodes = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=True) + + for enter_node in enter_nodes: + # connect Enter's output to Enter's input + self.g.replace_all_inputs(enter_node.output[0], enter_node.input[0], ops=ops) + + dependent_vars = [] + for merge_node in merge_nodes: + enter_node = [n for n in merge_node.inputs if n.type == "Enter"][0] + loop_var = context.loop_properties.all_variables[enter_node.name] + + # cut off connection between condition graph and Merge node. + # replace condition graph's inputs to be cell graph's outputs, because we want condition graph + # to consumer cell graph outputs. + non_switch_consumers = [n for n in self.g.find_output_consumers(merge_node.output[0]) if n.type != "Switch"] + self.g.replace_all_inputs(merge_node.output[0], loop_var.next_iteration_input.id, + ops=non_switch_consumers) + dependent_vars.append(loop_var) + + # cut off connection between condition graph and LoopCond node. + self.g.replace_all_inputs(context.loop_cond.output[0], INVALID_INPUT_ID, ops=[context.loop_cond]) + + graph_info = GraphInfo(ops, [], outputs) + graph_info.dependent_vars = dependent_vars + return graph_info + + def _cut_off_connection_for_cell(self, context): + for val in context.loop_properties.all_variables.values(): + if val.switch_true_identity_output.id: + # remove the node to cut off a starting node of the cell (e.g. loop body). + n = self.g.get_node_by_output(val.switch_true_identity_output.id) + self.g.remove_node(n.name) + + if val.is_tensor_array: + # connect NextIteration to an invalid node, to cut off an ending node of the cell. + ta_write_nodes = [n for n in self.g.get_nodes() if is_tf_tensor_array_write_op(n)] + self.g.replace_all_inputs(val.next_iteration_input.id, INVALID_INPUT_ID, ops=ta_write_nodes) + else: + # connect NextIteration to an invalid node, to cut off an ending node of the cell. + next_iter_nodes = [n for n in self.g.get_nodes() if n.type == "NextIteration"] + self.g.replace_all_inputs(val.next_iteration_input.id, INVALID_INPUT_ID, ops=next_iter_nodes) + + for scan_input in context.loop_properties.scan_inputs: + # remove the node to cut off connection between scan_input and the cell. + self.g.remove_node(self.g.get_node_by_output(scan_input.id).name) + + def _get_loop_var_from_switch(self, switch_node): + if switch_node.type != 'Switch': + logger.error("not a switch node, skip") + return None + + # the first input is data + merge_node = switch_node.inputs[0] + if merge_node.type != "Merge": + logger.error("switch node does not has Merge as its first input") + return None + + # find the output_true consumers + switch_consumers = self.g.find_output_consumers(switch_node.output[1]) + switch_true_consumer_cnt = len(switch_consumers) + if switch_true_consumer_cnt == 0: + switch_true_identity_output = None + elif switch_true_consumer_cnt == 1: + if switch_consumers[0].type == "Identity": + switch_true_identity_output = switch_consumers[0].output[0] + else: + # using grappler there is not necessarily an identity behind switch + switch_true_identity_output = switch_node.output[1] + else: + # insert identity if there are 2 or more consumers. This can happen on tf-1.15. + switch_true_identity_output = self.g.make_node("Identity", [switch_node.output[1]], + shapes=[switch_node.output_shapes[1]], + dtypes=[switch_node.output_dtypes[1]]) + switch_true_identity_output = switch_true_identity_output.output[0] + for n in switch_consumers: + for i, nn in enumerate(n.input): + if nn == switch_node.output[1]: + n.input[i] = switch_true_identity_output + + target_node_input_id = None + enter_node = [n for n in merge_node.inputs if n.type == 'Enter'][0] + target_node_input_id = enter_node.input[0] + logger.debug("a Switch >> Merge >> Enter is found called %s", enter_node.inputs[0].name) + + next_iteration_node = [n for n in merge_node.inputs if n.type == 'NextIteration'][0] + last_iteration_output_id = next_iteration_node.input[0] + + # find the output_false consumers to see whether there is consumer for this var + switch_false_consumers = self.g.find_output_consumers(switch_node.output[0]) + false_consumer_count = len(switch_false_consumers) + exit_output_id = None + if false_consumer_count == 1: + exit_node = switch_false_consumers[0] + if exit_node.type != "Exit": + raise ValueError("switch false branch is followed by non-Exit") + exit_output_id = exit_node.output[0] + elif false_consumer_count == 0: + # sometime, the variable output won't be used in the new iteration as input. + exit_output_id = None + else: + raise ValueError("unexpected number of switch false consumers") + + is_ta = False + ta_index_id = None + if is_tf_tensor_array_op(self.g.get_node_by_output(target_node_input_id)): + is_ta = True + + ta_write_node = self.g.get_node_by_output(last_iteration_output_id) + utils.make_sure(is_tf_tensor_array_write_op(ta_write_node), "ta nextiteration is not following ta write op") + last_iteration_output_id = ta_write_node.input[2] + ta_index_id = ta_write_node.input[1] + + # here we parse patterns generated by + # ta.write(), then ta.stack(), because this is the most frequent usage pattern. + if exit_output_id: + exit_consumers = self.g.find_output_consumers(exit_output_id) + ta_gather_node = [n for n in exit_consumers if is_tf_tensor_array_gather_op(n)][0] + + # update exit output id, treat the gather output as ta's output + exit_output_id = ta_gather_node.output[0] + + loop_var = LoopVariable(enter_node.name, target_node_input_id, last_iteration_output_id, + switch_true_identity_output, exit_output_id, is_ta, ta_index_id, self.g) + + return loop_var + + @staticmethod + def find_subgraph(input_ids, output_ids, g, merge_as_end=False): + logger.debug("input ids %s ", input_ids) + logger.debug("output ids %s ", output_ids) + + enter_nodes = set() + merge_nodes = set() + + def find_input_boundary(node): + if node.type == "Enter": + enter_nodes.add(node) + logger.debug("terminate the input search at %s", node.name) + return False + + if merge_as_end is True and node.type == "Merge": + merge_nodes.add(node) + logger.debug("terminate the input search at %s", node.name) + return False + + if node.is_const(): + logger.debug("terminate search at const node %s", node.name) + return False + + for o in node.output: + if o in input_ids: + return False + return True + + nodes = g.extract_sub_graph_nodes(output_ids, input_checker=find_input_boundary) + return nodes, enter_nodes, merge_nodes + + @staticmethod + def construct_graph_from_nodes(parent_g, nodes, outputs): + return utils.construct_graph_from_nodes( + parent_g, + nodes, + [out.id for out in outputs], + [out.shape for out in outputs], + [out.dtype for out in outputs] + ) diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..b2bdc4302689246ef50991f426770f9266f655b9 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter.py @@ -0,0 +1,433 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.lstm_rewriter +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import numpy as np +from tf2onnx import utils +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.rewriter.rnn_utils import RNNUnitType, get_weights_from_const_node +from tf2onnx.utils import is_tf_concat_op, is_tf_slice_op + +from tf2onnx.rewriter.lstm_rewriter_base import LSTMRewriterBase + +# pylint: disable=invalid-name,unused-argument,missing-docstring + + +logger = logging.getLogger(__name__) + + +class LSTMRewriter(LSTMRewriterBase): + def __init__(self, g): + super(LSTMRewriter, self).__init__(g) + self.lstm_cell_type = None + self.num_lstm_layers = 0 + + def run(self): + logger.debug("enter lstm rewriter") + return super(LSTMRewriter, self).run() + + def find_cell(self, context): + lstm_cell_types = [RNNUnitType.LSTMCell, RNNUnitType.LSTMBlockCell] + for cell_type in lstm_cell_types: + cell_match = self._match_cell(context, cell_type) + if cell_match and len(cell_match) >= 1: + self.num_lstm_layers = len(cell_match) + logger.debug("number of LSTM layers: %s", self.num_lstm_layers) + for i in range(self.num_lstm_layers): + self.state_variable_handlers.append({ + "ct" + str(i): (self._ct_variable_finder, self._connect_lstm_yc_to_graph, i), + "ht" + str(i): (self._ht_variable_finder, self._connect_lstm_yh_to_graph, i) + }) + self.state_variable_handlers.append({ + "ct_ht" + str(i): (self._ct_ht_shared_variable_finder, self._connect_lstm_ych_to_graph, i) + }) + logger.debug("parsing unit is %s, num layers is %d", cell_type, self.num_lstm_layers) + if cell_match: + self.lstm_cell_type = cell_type + logger.debug("parsing unit is %s", cell_type) + return cell_match + logger.debug("cannot parse unit") + return None + + def get_weight_and_bias(self, context): + weight_and_bias = list() + for i in range(self.num_lstm_layers): + if self.lstm_cell_type == RNNUnitType.LSTMCell: + weight_and_bias.append(self._get_weight_and_bias_for_lstm_cell(context, i)) + if self.lstm_cell_type == RNNUnitType.LSTMBlockCell: + weight_and_bias.append(self._get_weight_and_bias_for_lstmblock_cell(context, i)) + return weight_and_bias + + def _get_weight_and_bias_for_lstmblock_cell(self, context, i): + cell_match = context.cell_match[i] + + w_node = cell_match.get_op("cell_kernel") + w = get_weights_from_const_node(self.g, w_node) + if w is None: + logger.warning("Cannot find weight, SKIP") + return None + + b_node = cell_match.get_op("cell_bias") + b = get_weights_from_const_node(self.g, b_node) + if b is None or b.shape[0] != w.shape[1]: + logger.warning("cell_kernel and cell_bias's dimension doesn't match, SKIP") + return None + + lstm_block_cell = cell_match.get_op("lstm_block_cell") + ft_bias_val = np.array( + lstm_block_cell.get_attr("forget_bias").f, + dtype=b.dtype + ) + + return { + "weight": w, + "bias": b, + "ft_bias": ft_bias_val + } + + def _get_weight_and_bias_for_lstm_cell(self, context, i): + match = context.cell_match[i] + + w_e = match.get_op("cell_kernel") + w = get_weights_from_const_node(self.g, w_e) + if w is None or w.size == 0: + return None + + # check https://www.tensorflow.org/versions/r1.8/api_docs/cc/class/tensorflow/ops/bias-add + # for bias_add data format + bias_add = match.get_op("bias_add") + if bias_add is not None and bias_add.data_format != "NHWC": + logger.debug("BiasAdd data_format is not NHWC, SKIP") + return None + + b_e = match.get_op("cell_bias") + if b_e is None: + b = np.array([0 for i in range(len(w[0]))]).astype(w.dtype) + else: + b = get_weights_from_const_node(self.g, b_e) + if b is None or b.shape[0] != w.shape[1]: + logger.warning("cell_kernel and cell_bias's dimensions does not match, skip") + return None + + ft_bias_node = match.get_op("ft_bias") + ft_bias = get_weights_from_const_node(self.g, ft_bias_node) + if ft_bias is None: + return None + + if not b.dtype == ft_bias.dtype: + return None + + return { + "weight": w, + "bias": b, + "ft_bias": ft_bias + } + + def parse_attributes(self, context): + if self.lstm_cell_type == RNNUnitType.LSTMBlockCell: + lstm_block_cell = context.cell_match[0].get_op("lstm_block_cell") + clip = float(lstm_block_cell.get_attr("cell_clip").f) + # current LSTM op cannot handle clip + if clip > 0: + return False + + use_peephole = lstm_block_cell.get_attr_value("use_peephole") + if use_peephole: + return False + return True + + def _ct_variable_finder(self, context, i): + if self.lstm_cell_type == RNNUnitType.LSTMCell: + lstm_cell = context.cell_match[i] + return self._find_state_variable_with_select( + context, + lstm_cell.get_op("ct").output[0], + [lstm_cell.get_op("ct_identity_consumer")] + ) + if self.lstm_cell_type == RNNUnitType.LSTMBlockCell: + lstm_block_cell = context.cell_match[i].get_op("lstm_block_cell") + return self._find_state_variable_with_select( + context, + lstm_block_cell.output[1], + [lstm_block_cell] + ) + return None + + def _ht_variable_finder(self, context, i): + if self.lstm_cell_type == RNNUnitType.LSTMCell: + lstm_cell = context.cell_match[i] + return self._find_state_variable_with_select( + context, + lstm_cell.get_op("ht").output[0], + [lstm_cell.get_op("xh")] + ) + if self.lstm_cell_type == RNNUnitType.LSTMBlockCell: + lstm_block_cell = context.cell_match[i].get_op("lstm_block_cell") + return self._find_state_variable_with_select( + context, + lstm_block_cell.output[6], + [lstm_block_cell] + ) + return None + + def _ct_ht_shared_variable_finder(self, context, i): + if self.lstm_cell_type == RNNUnitType.LSTMBlockCell: + return None + + lstm_cell = context.cell_match[i] + ct = lstm_cell.get_op("ct").output[0] + ht = lstm_cell.get_op("ht").output[0] + ct_concat = [c for c in self.g.find_output_consumers(ct) if is_tf_concat_op(c)] + ht_concat = [c for c in self.g.find_output_consumers(ht) if is_tf_concat_op(c)] + if len(ct_concat) != 1 or len(ht_concat) != 1 or ct_concat[0] != ht_concat[0]: + logger.debug("failed to find ct-ht concat") + return None + ct_ht_shared_output = ct_concat[0].output[0] + + consumers = [] + ct_identity_consumer = lstm_cell.get_op("ct_identity_consumer") + ht_identity_consumer = lstm_cell.get_op("xh") + ct_slice = [c for c in ct_identity_consumer.inputs if is_tf_slice_op(c)] + ht_slice = [c for c in ht_identity_consumer.inputs if is_tf_slice_op(c)] + if len(ct_slice) != 1 or len(ht_slice) != 1: + logger.debug("failed to find slice op before identity consumers") + return None + consumers.extend([ct_slice[0], ht_slice[0]]) + + return self._find_state_variable_with_select( + context, + ct_ht_shared_output, + consumers + ) + + def is_valid(self, context): + # except for ct, ht or ct_ht, there are at most 2 state variables + if len(context.loop_properties.state_variables) - \ + len(context.state_variables) > 2: + return False + + # output is no more than 1 + outputs = context.loop_properties.scan_outputs_exits + if len(outputs) > 1: + logger.debug("found %d outputs for lstm: %s", len(outputs), outputs) + return False + return True + + def process_weights_and_bias_per_layer(self, context, i): + weights = context.weights[i] + w_r_icfo = weights["weight"] + w_dtype = weights["weight"].dtype + b_r_icfo = weights["bias"] + b_dtype = weights["bias"].dtype + ft_bias_scalar = weights["ft_bias"] + + # split bias for each hidden unit + # b_r_icfo: (4 * num_units,) + bias_dim = b_r_icfo.shape[0] + hidden_size = int(bias_dim / 4) + b_r_icfo = np.reshape(b_r_icfo, (1, bias_dim)) + bias_gates = np.split(b_r_icfo, 4, axis=1) + ft_bias = np.add(bias_gates[2], ft_bias_scalar) + wb_bias_iofc = np.concatenate((bias_gates[0], bias_gates[3], ft_bias, bias_gates[1]), axis=1) + + # fill Rb with empty since in TF, we have only one bias. + rb_bias_iofc = np.zeros((1, bias_dim), dtype=b_dtype) + B = np.concatenate((wb_bias_iofc, rb_bias_iofc), axis=1) + assert B.shape == (1, 2 * bias_dim) + + [wx, wh] = np.split(w_r_icfo, [-1 * hidden_size]) + input_size = wx.shape[0] + assert wx.shape[0] == input_size + assert int(wx.shape[1] / 4) == hidden_size + + # split weight for gates + w_gates = np.split(wx, 4, axis=1) + new_wx = np.concatenate((w_gates[0], w_gates[3], w_gates[2], w_gates[1]), axis=1) + + h_gates = np.split(wh, 4, axis=1) + new_wh = np.concatenate((h_gates[0], h_gates[3], h_gates[2], h_gates[1]), axis=1) + W_iofc = np.transpose(new_wx) + R_iofc = np.transpose(new_wh) + + W = np.array([W_iofc], w_dtype) + R = np.array([R_iofc], w_dtype) + + # create node + w_name = utils.make_name("W" + str(i)) + w_node = self.g.make_const(w_name, W, skip_conversion=True) + + r_name = utils.make_name("R" + str(i)) + r_node = self.g.make_const(r_name, R, skip_conversion=True) + + b_name = utils.make_name("B" + str(i)) + b_node = self.g.make_const(b_name, B, skip_conversion=True) + + context.input_size[i] = input_size + context.hidden_size[i] = hidden_size + context.onnx_input_ids[i]["W"] = w_node.output[0] + context.onnx_input_ids[i]["R"] = r_node.output[0] + context.onnx_input_ids[i]["B"] = b_node.output[0] + + def process_weights_and_bias(self, context): + for i in range(self.num_lstm_layers): + self.process_weights_and_bias_per_layer(context, i) + + def process_var_init_nodes(self, context): + for i in range(self.num_lstm_layers): + self.process_var_init_nodes_per_layer(context, i) + + def process_var_init_nodes_per_layer(self, context, i): + init_h_id = None + init_c_id = None + if "ct_ht" + str(i) in context.state_variables: + init_h_id, init_c_id = self._process_non_tuple_ch_init_nodes(context, i) + elif "ct" + str(i) in context.state_variables and ("ht" + str(i)) in context.state_variables: + init_h_id, init_c_id = self._process_tuple_ch_init_nodes(context, i) + else: + raise ValueError("no initializers, unexpected") + assert init_h_id and init_c_id + context.onnx_input_ids[i]["initial_h"] = init_h_id + context.onnx_input_ids[i]["initial_c"] = init_c_id + + def _process_non_tuple_ch_init_nodes(self, context, i): + gb = GraphBuilder(self.g) + input_id = context.state_variables["ct_ht" + str(i)].enter_input_id + hidden_size = context.hidden_size[i] + + attr = {"axes": [1], "starts": [0], "ends": [hidden_size]} + inputs_map = {"data": input_id, **attr} + slice_node1 = GraphBuilder(self.g).make_slice(inputs_map) + unsqueeze_node_1 = gb.make_unsqueeze({'data': slice_node1, "axes": [0]}, return_node=True) + + attr = {"axes": [1], "starts": [hidden_size], "ends": [hidden_size * 2]} + inputs_map = {"data": input_id, **attr} + slice_node2 = GraphBuilder(self.g).make_slice(inputs_map) + unsqueeze_node_2 = gb.make_unsqueeze({'data': slice_node2, "axes": [0]}, return_node=True) + + return unsqueeze_node_1.output[0], unsqueeze_node_2.output[0] + + def _process_tuple_ch_init_nodes(self, context, i): + h_init_input_id = context.state_variables["ht" + str(i)].enter_input_id + c_init_input_id = context.state_variables["ct" + str(i)].enter_input_id + h_node_output = self._process_c_or_h_init_nodes(h_init_input_id, context) + c_node_output = self._process_c_or_h_init_nodes(c_init_input_id, context) + return h_node_output, c_node_output + + def _process_c_or_h_init_nodes(self, initializer_input_id, context): + node = self.g.get_node_by_output(initializer_input_id) + if node.is_const(): + val = node.get_tensor_value(as_list=False) + initial_name = utils.make_name("Const") + new_val = np.expand_dims(val, axis=0) + const_node = self.g.make_const(initial_name, new_val) + return const_node.output[0] + + gb = GraphBuilder(self.g) + squeeze_node = gb.make_unsqueeze({'data': initializer_input_id, "axes": [0]}, return_node=True) + to_replace = [n for n in self.g.get_nodes() if n != squeeze_node] + self.g.replace_all_inputs(initializer_input_id, squeeze_node.output[0], ops=to_replace) + return squeeze_node.output[0] + + def create_single_rnn_node(self, context, i): + # specify if the RNN is forward, reverse, or bidirectional. + # Must be one of forward (default), reverse, or bidirectional. + # Here we won't mark bidirectional/reverse, we will have another rewriter running + # after this one, which will based on patterns to combine a forward LSTM and a + # backward LSTM into a bidirectional one. + num_direction = 1 + # todo: input_forget + context.attributes[i]["direction"] = "forward" + context.attributes[i]["hidden_size"] = context.hidden_size[i] + inputs = context.onnx_input_ids[i] + lstm_inputs = [ + inputs["X"], inputs["W"], inputs["R"], inputs["B"], + inputs["sequence_lens"], inputs["initial_h"], inputs["initial_c"]] + + x_shape = self.g.get_shape(lstm_inputs[0]) + x_seq_length = x_shape[0] + x_batch_size = x_shape[1] + out_dtype = self.g.get_dtype(lstm_inputs[0]) + + lstm_node = self.g.make_node("LSTM", lstm_inputs, attr=context.attributes[i], output_count=3, + shapes=[[x_seq_length, num_direction, x_batch_size, context.hidden_size[i]], + [num_direction, x_batch_size, context.hidden_size[i]], + [num_direction, x_batch_size, context.hidden_size[i]]], + dtypes=[out_dtype, out_dtype, out_dtype], op_name_scope=context.rnn_scope) + return lstm_node + + def create_rnn_node(self, context): + gb = GraphBuilder(self.g) + rnn_nodes = list() + outputs = context.loop_properties.scan_outputs_exits + logger.debug("number of rnn node outputs: %s", len(outputs)) + + for i in range(self.num_lstm_layers): + logger.debug("creating rnn node for layer: %s", i) + rnn_nodes.append(self.create_single_rnn_node(context, i)) + output_id = rnn_nodes[i].output[0] + rnn_output_shape = self.g.get_shape(output_id) + squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]] + squeeze_node = gb.make_squeeze({"data": output_id, "axes": [1]}, + shapes=[squeeze_output_shape], + dtypes=[self.g.get_dtype(output_id)], + return_node=True) + if i + 1 < self.num_lstm_layers: + logger.debug("setting input for layer: %s", i + 1) + context.onnx_input_ids[i + 1]["X"] = squeeze_node.output[0] + return rnn_nodes + + def _connect_lstm_yh_to_graph(self, context, i): + # in tf, y_h output shape is: [batch, hidden] + # in onnx, output shape is: [number_directions, batch, hidden] + gb = GraphBuilder(self.g) + exit_output = context.state_variables["ht" + str(i)].exit_output + output_id = context.rnn_node[i].output[1] + lstm_yh_shape = self.g.get_shape(output_id) + squeeze_node = gb.make_squeeze({"data": output_id, "axes": [0]}, + shapes=[[lstm_yh_shape[1], lstm_yh_shape[2]]], + dtypes=[self.g.get_dtype(output_id)], + return_node=True) + + self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes() + + def _connect_lstm_yc_to_graph(self, context, i): + # in tf, y_c output shape is: [batch, hidden] + # in onnx, output shape is: [number_directions, batch, hidden] + gb = GraphBuilder(self.g) + exit_output = context.state_variables["ct" + str(i)].exit_output + output_id = context.rnn_node[i].output[2] + lstm_yc_shape = self.g.get_shape(output_id) + squeeze_node = gb.make_squeeze({"data": output_id, "axes": [0]}, + shapes=[[lstm_yc_shape[1], lstm_yc_shape[2]]], + dtypes=[self.g.get_dtype(output_id)], + return_node=True) + + self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes() + + def _connect_lstm_ych_to_graph(self, context, i): + # in tf, concat of y_c and y_h output shape is: [batch, hidden *2] + # in onnx, y_c/y_h output shape is: [number_directions, batch, hidden] + gb = GraphBuilder(self.g) + exit_output = context.state_variables["ct_ht" + str(i)].exit_output + lstm_node = context.rnn_node[i] + yc_shape = self.g.get_shape(lstm_node.output[2]) + concat_output_shape = [yc_shape[0], yc_shape[1], yc_shape[2] * 2] + concat = self.g.make_node("Concat", [lstm_node.output[2], lstm_node.output[1]], + attr={"axis": 2}, shapes=[concat_output_shape], + dtypes=[self.g.get_dtype(lstm_node.output[2])]) + + squeeze_output_shape = [concat_output_shape[1], concat_output_shape[2]] + squeeze_node = gb.make_squeeze({'data': concat.output[0], "axes": [0]}, + shapes=[squeeze_output_shape], + dtypes=[self.g.get_dtype(concat.output[0])], + return_node=True) + + self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes() diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter_base.py b/lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter_base.py new file mode 100644 index 0000000000000000000000000000000000000000..b04b2cfd73c8e57045374845d30de83870fb1e16 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter_base.py @@ -0,0 +1,190 @@ +# SPDX-License-Identifier: Apache-2.0 + + +# Temporary base class exclusive for LSTMs for stacked LSTM layer support. +# Once GRU, BiLSTM, BiGRU re-writers will also be enhanced for stacked layer support +# this will be combined with unit rnn base class. + +""" +tf2onnx.rewriter.lstm_rewriter_base +""" + +from __future__ import division +from __future__ import print_function +import logging + +from tf2onnx import utils +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase +from tf2onnx.rewriter.rnn_utils import get_pattern +from tf2onnx.graph_matcher import GraphMatcher +from tf2onnx.rewriter.unit_rnn_rewriter_base import UnitRnnRewriterBase, UnitRnnContext + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access,W0223 + +class LSTMContext(UnitRnnContext): + def __init__(self): + super(LSTMContext, self).__init__() + self.cell_match = list() # matched cell + + self.weights = list({}) + self.input_size = list() + self.hidden_size = list() + + self.attributes = list({}) # onnx attributes + # onnx inputs: List of [X, W, R, B, sequence_lens, initial_h, initial_c, P] + self.onnx_input_ids = list({}) + + +class LSTMRewriterBase(UnitRnnRewriterBase): + """ + main procedures: + 1 check whether extracted loop is a unit LSTM, fall back in necessity: + 1 parse LSTM + 2 find needed info from tensorflow graph + 3 process found info according to ONNX requirement + """ + + def create_context(self): + return LSTMContext() + + def parse_unit_rnn(self, context): + """ + parse needed info from tensorflow graph: + 1 weight + 2 state variables used in rnn unit, such as c_t, h_t + 3 sequence node + 4 input_x + 5 attributes, e.g., activation_alpha, activation_beta... optional + """ + logger.debug("parse unit rnn") + self.state_variable_handler = list() + self.state_variable_handlers = list() + + logger.debug("match unit cell against loop body graph") + cell_match = self.find_cell(context) + if not cell_match: + logger.debug('failed to match cell pattern') + return False + cell_match.sort(key=lambda cmt: cmt.get_op("cell_kernel").name) + context.cell_match = cell_match + + logger.debug("get_weight_and_bias starts") + weights = self.get_weight_and_bias(context) + if not weights: + logger.debug("rnn weights check failed, SKIP") + return False + context.weights = weights + + if not self.get_state_variables(context): + logger.debug("no cell variable initializers found, SKIP") + return False + + seq_len_node = self.find_sequence_length_node(context) + if seq_len_node: + logger.debug("find sequence node: %s", seq_len_node.name) + + # require exact one input + inputs = context.loop_properties.scan_inputs_initial_values + if len(inputs) != 1: + logger.debug("found %d inputs for the unit rnn: %s", + len(inputs), inputs) + return False + + for i in range(len(context.cell_match)): + context.onnx_input_ids.append({}) + context.input_size.append(None) + context.hidden_size.append(None) + context.attributes.append({}) + context.onnx_input_ids[i]["sequence_lens"] = \ + seq_len_node.output[0] if seq_len_node else utils.ONNX_EMPTY_INPUT + + context.onnx_input_ids[0]["X"] = inputs[0] + if not self.parse_attributes(context): + logger.debug("wrong attributes found") + return False + + return True + + def _match_cell(self, context, unittype): + """match unit cell""" + for cell_pattern in get_pattern(unittype): + matcher = GraphMatcher(cell_pattern, allow_reorder=True) + + loop_props = context.loop_properties + inputs = loop_props.state_inputs + loop_props.scan_inputs + input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs] + outputs = loop_props.state_outputs + loop_props.scan_outputs + output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs] + body_graph_ops, _, _ = LoopRewriterBase.find_subgraph( + set(input_ids), + set(output_ids), + self.g, merge_as_end=True + ) + + match_results = list(matcher.match_ops(body_graph_ops)) + logger.debug("number of match results: %s", len(match_results)) + if len(match_results) > 0: + return match_results + return None + + def get_state_variables(self, context): + """ + Get state variables by provided handlers. There maybe several handlers corresponding to + different patterns of state variables. + The commone method is to find state variables from loop property according to its + next_iteration_input and switch_true_identity_output, see lstm_rewriter_v2 + """ + contains_handler = False + for handler in self.state_variable_handlers: + can_handle = True + for var_name, funcs in handler.items(): + finder = funcs[0] + state_variable = finder(context, funcs[2]) + if state_variable: + logger.debug("found state variable %s", var_name) + context.state_variables[var_name] = state_variable + else: + logger.debug("failed to get state variable %s", var_name) + can_handle = False + break + if can_handle: + self.state_variable_handler.append(handler) + contains_handler = True + return contains_handler + + def process_outputs(self, context): + for handler in self.state_variable_handler: + for var_name, funcs in handler.items(): + output_connector = funcs[1] + output_connector(context, funcs[2]) + logger.debug("connect output of %s to graph", var_name) + logger.debug("done handling all state variables, now focusing on final output") + self.connect_unit_rnn_output_to_graph(context) + + def connect_unit_rnn_output_to_graph(self, context): + outputs = context.loop_properties.scan_outputs_exits + if not outputs: + logger.debug("no one consume output") + return + + gb = GraphBuilder(self.g) + gather_output_id = outputs[0].id + logger.debug("found output for rnn: %s", gather_output_id) + + # in tf batch major mode, output shape is : [batch, time, hidden] + # in time major mode, output shape is: [time, batch, hidden] + # in onnx, output shape is : [time, num_directions, batch, hidden] + + rnn_node = context.rnn_node[len(context.rnn_node) - 1] + output_id = rnn_node.output[0] + rnn_output_shape = self.g.get_shape(output_id) + squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]] + squeeze_node = gb.make_squeeze({'data': output_id, "axes": [1]}, + shapes=[squeeze_output_shape], + dtypes=[self.g.get_dtype(output_id)], + return_node=True) + self.g.replace_all_inputs(gather_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes() diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/quantization_ops_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/quantization_ops_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..45d7e5431b9929cdacdc5e13b74f2da0d254ab42 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/quantization_ops_rewriter.py @@ -0,0 +1,125 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow QuantizeAndDequantizeV2|QuantizeAndDequantizeV3 op +""" + +import numpy as np +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher +from tf2onnx import utils + +# pylint: disable=missing-docstring + +def extract_numpy_array(node): + return np.frombuffer(node.attr["value"].t.raw_data, dtype="float32") + +def create_qdq_nodes(g, match_results): + + for match in match_results: + qdq_node = match.get_op('output') + qdq_node_output_dtype = g.get_dtype(qdq_node.output[0]) + qdq_node_output_shape = g.get_shape(qdq_node.output[0]) + + # Get the attributes of qdq node + narrow_range = qdq_node.attr['narrow_range'].i + signed_input = qdq_node.attr['signed_input'].i + + min_quantized, max_quantized = [-127, 127] + if not narrow_range and signed_input: + min_quantized = -128 + + if not signed_input: + min_quantized, max_quantized = [0, 255] + + # Get axis attribute for per channel implementation. + if 'axis' in qdq_node.attr: + axis = qdq_node.attr['axis'].i + + # Get the min and max value of the inputs to QDQ op + min_value = extract_numpy_array(qdq_node.inputs[1]) + max_value = extract_numpy_array(qdq_node.inputs[2]) + + num_channels = min_value.shape[0] + scales = np.zeros(num_channels, dtype=np.float32) + zero_point_dtype = np.int8 if signed_input else np.uint8 + zero_point = np.zeros(num_channels, dtype=zero_point_dtype) + + for i in range(num_channels): + # Calculate scales from the min and max values + scale_from_min_side = min_quantized/min_value[i] if min_quantized*min_value[i] > 0 else max_quantized + scale_from_max_side = max_quantized/max_value[i] if max_quantized*max_value[i] > 0 else max_quantized + + if scale_from_min_side < scale_from_max_side: + scale = scale_from_min_side + else: + scale = scale_from_max_side + + utils.make_sure(scale > 0, "Quantize/Dequantize scale must be greater than zero") + scales[i] = np.float32(scale) + + # Set scalars for scale and zero point for per layer quantization + if num_channels == 1: + scales = scales[0] + zero_point = zero_point[0] + attrs = {} + else: + utils.make_sure(axis and axis != -1, "Axis must be specified for per channel quantization") + utils.make_sure(g.opset >= 13, "Opset >= 13 is required for per channel quantization") + attrs = {'axis': axis} + + # Split it into QuantizeLinear and DequantizeLinear and remove the QDQ node reference + inverse_scale = (1/scales).astype(np.float32) + y_quant_scale = g.make_const(name=utils.make_name("y_quant_scale"), np_val=inverse_scale) + y_zero_point = g.make_const(name=utils.make_name("y_zero_point"), np_val=zero_point) + quant_node = g.make_node(op_type="QuantizeLinear", + inputs=[qdq_node.input[0], y_quant_scale.output[0], + y_zero_point.output[0]], + shapes=[qdq_node_output_shape], + attr=attrs, + dtypes=[qdq_node_output_dtype], + name=utils.make_name("QuantLinearNode")) + + g.set_shape(quant_node.output[0], qdq_node_output_shape) + + g.remove_node(qdq_node.name) + + y_dequant_scale = g.make_const(name=utils.make_name("y_dequant_scale"), np_val=inverse_scale) + y_inv_zero_point = g.make_const(name=utils.make_name("y_inv_zero_point"), np_val=zero_point) + dequant_node = g.make_node(op_type="DequantizeLinear", + inputs=[quant_node.output[0], y_dequant_scale.output[0], + y_inv_zero_point.output[0]], + outputs=[qdq_node.output[0]], + shapes=[qdq_node_output_shape], + attr=attrs, + dtypes=[qdq_node_output_dtype], + name=utils.make_name("DequantLinearNode")) + g.set_shape(dequant_node.output[0], qdq_node_output_shape) + + return g.get_nodes() + +def rewrite_quantize_and_dequantize(g, ops): + + pattern_for_qdq_v2 = \ + OpTypePattern('QuantizeAndDequantizeV2', name='output', inputs=[ + OpTypePattern("*"), + OpTypePattern(None), + OpTypePattern(None), + ]) + pattern_for_qdq_v3 = \ + OpTypePattern('QuantizeAndDequantizeV3', name='output', inputs=[ + OpTypePattern("*"), + OpTypePattern(None), + OpTypePattern(None), + OpTypePattern(None), + ]) + + # Match all the patterns for QDQ ops + patterns = [pattern_for_qdq_v3, pattern_for_qdq_v2] + match_results = [] + for pattern in patterns: + matcher = GraphMatcher(pattern) + results = list(matcher.match_ops(ops)) + match_results.extend(results) + + return create_qdq_nodes(g, match_results) diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/random_normal_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/random_normal_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..7194956b9664c60d72d73caffe9e6db1116fea9a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/random_normal_rewriter.py @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx random normal op +""" + +from tf2onnx import utils +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + + +# pylint: disable=missing-docstring + + +def rewrite_random_normal(g, ops): + pattern1 = \ + OpTypePattern('Add', name='output', inputs=[ + OpTypePattern('Mul', name='input2', inputs=[ + OpTypePattern('RandomStandardNormal', name='input1', inputs=["*"]), "*" + ]), "*" + ]) + + pattern2 = \ + OpTypePattern('Identity', name='output', inputs=[ + OpTypePattern('Identity', name='input2', inputs=[ + OpTypePattern('RandomStandardNormal', name='input1', inputs=["*"]) + ]) + ]) + + pattern_list = [pattern1, pattern2] + for pattern in pattern_list: + matcher = GraphMatcher(pattern) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + output = match.get_op('output') + if output.type == 'Add': + # pattern 1 + mean = output.inputs[1].get_tensor_value() + else: + # pattern 2 + mean = 0.0 + dtype = g.get_dtype(output.output[0]) + op_name = utils.make_name("RandomNormal") + out_name = utils.port_name(op_name) + + rn_op = match.get_op('input1') + seed = rn_op.get_attr('seed2').i + + if rn_op.inputs[0].type == "Shape": + shape_node = rn_op.inputs[0] + new_node = g.make_node("RandomNormalLike", [shape_node.input[0]], outputs=[out_name], name=op_name, + attr={"mean": mean, "scale": 1.0, "dtype": dtype, "seed": float(seed)}) + else: + shape = g.get_shape(output.output[0]) + new_node = g.make_node("RandomNormal", [], outputs=[out_name], name=op_name, + attr={"shape": shape, "mean": mean, "scale": 1.0, "dtype": dtype, "seed": seed}) + + g.replace_all_inputs(output.output[0], new_node.output[0], ops=ops) + g.safe_remove_nodes(match.get_nodes()) + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/random_uniform.py b/lib/python3.10/site-packages/tf2onnx/rewriter/random_uniform.py new file mode 100644 index 0000000000000000000000000000000000000000..e8fcb1f2fd41e76ad973770001c4b6e6b2d0ad92 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/random_uniform.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx random_uniform op +""" +import numpy as np +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher +from tf2onnx import utils, handler + + +# pylint: disable=missing-docstring + + +def rewrite_random_uniform(g, ops): + pattern = \ + OpTypePattern('Add', name='output', inputs=[ + OpTypePattern('Mul', inputs=[ + OpTypePattern('RandomUniform', name='input1', inputs=["*"]), + OpTypePattern('Sub', name='input2', inputs=["*", "*"]), + ]), None + ]) + + matcher = GraphMatcher(pattern) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + input2 = match.get_op('input2') + output = match.get_op('output') + ru_op = match.get_op('input1') + # max is on input 0 + tmax = input2.inputs[0].get_tensor_value() + tmin = input2.inputs[1].get_tensor_value() + to_delete = list(set(match.get_nodes())) + new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output, to_delete) + g.replace_all_inputs(output.output[0], new_node.output[0], ops=ops) + g.safe_remove_nodes(to_delete) + + return ops + + +# rewriter function when fold_const is enabled +def rewrite_random_uniform_fold_const(g, ops): + pattern = \ + OpTypePattern('Add', name='output', inputs=[ + OpTypePattern('Mul', name='mul', inputs=[ + OpTypePattern('RandomUniform', name='input1', inputs=["*"]), + None, + ]), + None, + ]) + + matcher = GraphMatcher(pattern) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + output = match.get_op('output') + mul = match.get_op('mul') + ru_op = match.get_op('input1') + + tmax_minus_tmin = mul.inputs[1].get_tensor_value() + tmin = output.inputs[1].get_tensor_value() + tmax = tmin + tmax_minus_tmin + to_delete = list(set(match.get_nodes())) + new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output, to_delete) + g.replace_all_inputs(output.output[0], new_node.output[0], ops=ops) + g.safe_remove_nodes(to_delete) + + return ops + + +def create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output, to_delete): + dtype = g.get_dtype(output.output[0]) + op_name = utils.make_name("RandomUniform") + shape_node = ru_op.inputs[0] + shape = g.get_shape(output.output[0]) + if shape_node.is_const(): + # if the tensorflow input (aka the shape) is const we can use the RandomUniform op + new_node = g.make_node("RandomUniform", [], name=op_name, + attr={"low": tmin, "high": tmax, "dtype": dtype, "shape": shape}, + shapes=[shape], dtypes=[dtype]) + else: + if shape_node.type == "Shape": + # if shape is dynamic - in tensorflow shape comes as tensor VALUE, + # in onnx RandomUniformLike finds takes the shape from the tensor itself. + # In many cases there is a shape op in tensorflow before RandomUniform and + # to make that work for onnx we just need to remove the shape op. + new_node = g.make_node("RandomUniformLike", inputs=[shape_node.input[0]], name=op_name, + attr={"low": tmin, "high": tmax, "dtype": dtype}, + shapes=[shape], dtypes=[dtype]) + else: + # if the shape is calculated we need to create a tensor so RandomUniformLike + # can take the shape from there. Pre opset9 this is somewhat hacky because there is + # no real fill op in onnx. In general this is not going to help performance but the tensors + # created are expected to be small. + + # tell the caller to not delete the shape node + to_delete.remove(shape_node) + # create a fill op with the shape of the value of the input tensor + zero = g.make_const(utils.make_name("zero"), np.zeros((), dtype=np.float32)) + fill_node = g.make_node("Fill", inputs=[shape_node.output[0], zero.name], + shapes=[shape], dtypes=[dtype]) + func, _ = handler.tf_op.find_effective_op("Fill") + func(g, fill_node) + # and use RandomUniformLike to create the random tensor + new_node = g.make_node("RandomUniformLike", inputs=[fill_node.output[0]], name=op_name, + attr={"low": tmin, "high": tmax, "dtype": dtype}, + shapes=[shape], dtypes=[dtype]) + return new_node diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/rnn.py b/lib/python3.10/site-packages/tf2onnx/rewriter/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..92ed5c35782754ed060a2f7c2dadaf67857f2104 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/rnn.py @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.rnn - lstm support +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +from tf2onnx.rewriter.bilstm_rewriter import rewrite_bidirectional_lstms +from tf2onnx.rewriter.bigru_rewriter import rewrite_bidirectional_grus +from tf2onnx.rewriter.custom_rnn_rewriter import CustomRnnRewriter +from tf2onnx.rewriter.loop_rewriter import LoopRewriter +from tf2onnx.rewriter.lstm_rewriter import LSTMRewriter +from tf2onnx.rewriter.gru_rewriter import GRUUnitRewriter + +# pylint: disable=invalid-name,unused-argument,missing-docstring + + +logger = logging.getLogger(__name__) + + +def rewrite_single_direction_lstm(g, ops): + r = LSTMRewriter(g) + return r.run() + + +def rewrite_bi_direction_lstm(g, ops): + return rewrite_bidirectional_lstms(g, ops) + + +def rewrite_single_direction_gru(g, ops): + r = GRUUnitRewriter(g) + return r.run() + + +def rewrite_bi_direction_gru(g, ops): + return rewrite_bidirectional_grus(g, ops) + + +def rewrite_custom_rnn_cell(g, ops): + return CustomRnnRewriter(g).run() + + +def rewrite_generic_loop(g, ops): + return LoopRewriter(g).run() diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/rnn_utils.py b/lib/python3.10/site-packages/tf2onnx/rewriter/rnn_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0753abbcd9bf3e97cadbdee53bbb8031dce7e289 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/rnn_utils.py @@ -0,0 +1,585 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.rnn_utils - rnn support +""" + +from __future__ import unicode_literals +from collections import defaultdict +from enum import Enum + +import logging +import numpy as np +from tf2onnx import utils +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.graph_matcher import OpTypePattern # pylint: disable=unused-import + + +# pylint: disable=invalid-name,unused-argument,missing-docstring + + + +logger = logging.getLogger(__name__) + + +class REWRITER_RESULT(Enum): + SKIP = 1 + OK = 2 + FAIL = 3 + + +# TensorFlow LSTMCell/BasicLSTMCell computation graph matching + +xc_pattern = \ + OpTypePattern('Split', inputs=[ + OpTypePattern("Const"), # axis for split + OpTypePattern("BiasAdd", name="bias_add", inputs=[ + OpTypePattern("MatMul", inputs=[ + OpTypePattern("ConcatV2|Concat", name="xh"), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="cell_kernel"), + ]), + ]), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="cell_bias"), + ]), + ]), + ]) + +lstmcell_pattern = \ + OpTypePattern('Mul', name='ht', inputs=[ + OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern]), + OpTypePattern('Tanh', inputs=[ + OpTypePattern("Add|AddV2", name="ct", inputs=[ + OpTypePattern("Mul", name="ct_identity_consumer", inputs=[ + OpTypePattern("Sigmoid", name="ft", inputs=[ + OpTypePattern("Add|AddV2", inputs=[ + xc_pattern, + OpTypePattern("*", name="ft_bias"), + ]), + ]), + OpTypePattern("*"), + ]), + OpTypePattern("Mul", inputs=[ + OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern]), + OpTypePattern("Tanh", name="gt", inputs=[xc_pattern]), + ]), + ]), + ]), + ]) + +xc_pattern_optimized = \ + OpTypePattern('Split', inputs=[ + OpTypePattern("Const"), + OpTypePattern("Identity", inputs=[ + OpTypePattern("MatMul", inputs=[ + OpTypePattern("ConcatV2|Concat", name="xh"), + OpTypePattern("Const", name="cell_kernel"), + ]), + ]), + ]) + +lstmcell_pattern_optimized = \ + OpTypePattern('Mul', name='ht', inputs=[ + OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern_optimized]), + OpTypePattern('Tanh', inputs=[ + OpTypePattern("Add|AddV2", name="ct", inputs=[ + OpTypePattern("Mul", name="ct_identity_consumer", inputs=[ + OpTypePattern("Sigmoid", name="ft", inputs=[ + OpTypePattern("Add|AddV2", inputs=[ + xc_pattern_optimized, + OpTypePattern("*", name="ft_bias"), + ]), + ]), + OpTypePattern("*"), + ]), + OpTypePattern("Mul", inputs=[ + OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern_optimized]), + OpTypePattern("Tanh", name="gt", inputs=[xc_pattern_optimized]), + ]), + ]), + ]), + ]) + +# input sequence: top to down, left to right +# split into update gate and reset gate +gru_split_pattern = \ + OpTypePattern("Split", inputs=[ + OpTypePattern("Const"), # split dim, a constant + OpTypePattern("Sigmoid", inputs=[ + OpTypePattern("BiasAdd", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="gate_bias") + ]), + OpTypePattern("MatMul", name="update_reset_gate", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="gate_kernel") + ]), + OpTypePattern("ConcatV2|Concat", name="cell_inputs") + ]) + ]) + ]) + ]) + + +grucell_pattern = \ + OpTypePattern("Add", name="cell_output", inputs=[ + OpTypePattern("Mul", inputs=[ + gru_split_pattern, + OpTypePattern("Identity") + ]), + OpTypePattern("Mul", inputs=[ + OpTypePattern("Sub", inputs=[ + OpTypePattern("Const"), # 1-u + gru_split_pattern + ]), + OpTypePattern("*", name="optional_activation", inputs=[ + OpTypePattern("BiasAdd", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_bias") + ]), + OpTypePattern("MatMul", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_kernel") + ]), + OpTypePattern("ConcatV2|Concat") + ]) + ]) + ]) + ]) + ]) + + +cudnn_compatible_grucell_pattern = \ + OpTypePattern("Add", name="cell_output", inputs=[ + OpTypePattern("Mul", inputs=[ + OpTypePattern("Sub", inputs=[ + OpTypePattern("Const"), # 1-u + gru_split_pattern + ]), + OpTypePattern("*", name="optional_activation", inputs=[ + OpTypePattern("Add", inputs=[ + OpTypePattern("Mul", inputs=[ + gru_split_pattern, + OpTypePattern("BiasAdd", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_state_bias") + ]), + OpTypePattern("MatMul", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_state_kernel"), + ]), + OpTypePattern("Identity") + ]) + ]) + ]), + OpTypePattern("BiasAdd", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_input_bias") + ]), + OpTypePattern("MatMul", inputs=[ + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_input_kernel"), + ]), + OpTypePattern("*") + ]) + ]) + ]) + ]) + ]), + OpTypePattern("Mul", inputs=[ + gru_split_pattern, + OpTypePattern("Identity") + ]) + ]) + + +grublockcell_pattern0 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[ + OpTypePattern("*"), + OpTypePattern("*"), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="gate_kernel") + ]), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_kernel") + ]), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="gate_bias") + ]), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="hidden_bias") + ]) +]) + + +grublockcell_pattern1 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[ + OpTypePattern("*"), + OpTypePattern("*"), + OpTypePattern("Const", name="gate_kernel"), + OpTypePattern("Const", name="hidden_kernel"), + OpTypePattern("Const", name="gate_bias"), + OpTypePattern("Const", name="hidden_bias") +]) + + +lstmblockcell_pattern = \ + OpTypePattern("LSTMBlockCell", name="lstm_block_cell", inputs=[ + OpTypePattern("*"), + OpTypePattern("*"), + OpTypePattern("*"), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="cell_kernel") + ]), + OpTypePattern("*", name="Pi"), + OpTypePattern("*", name="Pf"), + OpTypePattern("*", name="Po"), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="cell_bias") + ]) + ]) + + +seq_len_pattern0 = OpTypePattern("Select|SelectV2", inputs=[ + OpTypePattern("GreaterEqual", inputs=[ + OpTypePattern("*"), + OpTypePattern("Enter", inputs=[ + OpTypePattern("*", name="seq_len_node") + ]) + ]), + OpTypePattern("*"), + OpTypePattern("*") +]) + + +seq_len_pattern1 = OpTypePattern("Select|SelectV2", inputs=[ + OpTypePattern("GreaterEqual", inputs=[ + OpTypePattern("*"), + OpTypePattern("Const", name="seq_len_node") + ]), + OpTypePattern("*"), + OpTypePattern("*") +]) + + +class RNNUnitType(Enum): + LSTMCell = 0 # TF LSTMCell and BasicLSTMCell share the same pattern + LSTMBlockCell = 1 + GRUCell = 2 + GRUBlockCell = 3 + CudnnCompatibleGRUCell = 4 + + +rnn_cell_patterns = { + RNNUnitType.LSTMCell: [lstmcell_pattern, lstmcell_pattern_optimized], + RNNUnitType.LSTMBlockCell: [lstmblockcell_pattern], + RNNUnitType.GRUCell: [grucell_pattern], + RNNUnitType.GRUBlockCell: [grublockcell_pattern0, grublockcell_pattern1], + RNNUnitType.CudnnCompatibleGRUCell: [cudnn_compatible_grucell_pattern] +} + + +def get_pattern(cell_type_name): + return rnn_cell_patterns[cell_type_name] + + +def get_rnn_scope_name(while_scope_name): + parts = while_scope_name.split('/') + rnn_scope = '/'.join(parts[0:-2]) + "/" + return rnn_scope + + +def parse_rnn_loop(graph, loop_properties, rnn_scope, while_context_scope): + """check if the while loop is generated by dynamic_rnn or bidirectional_rnn + + Args: + loop_properties: LoopProperties + rnn_scope: rnn scope name + while_context_scope: while loop scope name + + check a while loop is generated by dynamic_rnn or bidirectional_rnn by + + 1. some patterns in _time_step in dynamic_rnn: tensor array read, tensor array write + 2. some patterns in control_flow_ops.while_loop in dynamic_rnn: + cond: time < loop_bound + loop_vars: (time, output_ta, state) + time has name called "time" + iteration_cnt is added by control flow. + + be noted: + 1. iteration counter does not exist in tf1.4 or earlier versions + 2. if dynamic_rnn's first input is not consumed, output ta does not exist. + """ + time_name = rnn_scope + "time" + ta_array_name_prefix = rnn_scope + "dynamic_rnn/output_" + iteration_counter_name = while_context_scope + "iteration_counter" + + found_time = False + is_rnn_out_ta = None + time_var = None + iteration_var = None + for val in loop_properties.all_variables.values(): + enter_input_node = graph.get_node_by_output(val.enter_input_id) + if val.is_tensor_array: + ta_name = enter_input_node.get_attr("tensor_array_name").s.decode("utf-8") + if not ta_name.startswith(ta_array_name_prefix): + is_rnn_out_ta = False + elif enter_input_node.name == time_name: + found_time = True + time_var = val + elif enter_input_node.name == iteration_counter_name: + iteration_var = val + + if not found_time or is_rnn_out_ta is False: + logger.debug("this should not be a dynamic_rnn loop, found_time: %s, is_rnn_out_ta: %s", + found_time, is_rnn_out_ta) + return None + + if not loop_properties.tensor_array_inputs: + logger.debug("this should not be a dynamic_rnn loop, no ta input is found") + return None + + return time_var, iteration_var + + +def get_weights_from_const_node(g, node): + temp = node + val = None + # this would help ignore Identity in non-const_folded graph. + while temp.type == 'Identity': + temp = temp.inputs[0] + + if temp and temp.type == 'Const': + val = temp.get_tensor_value(as_list=False) + dtype = utils.map_onnx_to_numpy_type(g.get_dtype(temp.output[0])) + val = val.astype(dtype) + logger.debug("found weights %s", temp.name) + else: + logger.debug("weight node seems not to be Const, skip, node name is %s", temp.name) + return None + + return val + + +###################################################### +#### Utilities for bidirectional rnn ####### +###################################################### +class ONNX_RNN_TYPE(Enum): + GRU = 0 + LSTM = 1 + + +onnx_rnn_type_mapping = { + ONNX_RNN_TYPE.GRU: "GRU", + ONNX_RNN_TYPE.LSTM: "LSTM" +} + +onnx_rnn_attr_mapping = { + ONNX_RNN_TYPE.LSTM: [ + "clip", + "hidden_size", + "input_forget" + ], + ONNX_RNN_TYPE.GRU: { + "clip", + "hidden_size", + "linear_before_reset" + } +} +onnx_rnn_seq_len_index_mapping = { + ONNX_RNN_TYPE.LSTM: 4, + ONNX_RNN_TYPE.GRU: 4 +} + + +def find_bidirectional_rnns(g, ops, rnn_type): + """ + Find possible bidirectional rnns, return: list of tuple, + Format of tuple is (fw onnx rnn node, bw onnx rnn node). + """ + fw_rnns = defaultdict(list) + bw_rnns = defaultdict(list) + for n in g.get_nodes(): + if n.type != onnx_rnn_type_mapping[rnn_type]: + continue + + input_id = n.input[0] + temp = n.inputs[0] + is_bw = False + if temp.type == "Transpose": + input_id = temp.input[0] + temp = temp.inputs[0] + + if utils.is_tf_reverse_op(temp): + input_id = temp.input[0] + is_bw = True + + if is_bw: + # if output 0 is consumed and there is no reverse after the 1st output. + # it's not backward rnn. + if g.find_output_consumers(n.output[0]) and not get_reverse_nodes_after_y_output(g, n): + logger.warning("rnn %s following Reverse op isn't the part of bi-rnn.", n.name) + continue + + logger.debug("find bw rnn %s", input_id) + bw_rnns[input_id].append(n) + else: + logger.debug("find fw rnn %s", input_id) + fw_rnns[input_id].append(n) + + # fw_rnn and bw_rnn must share the same input + birnn_input = list(set(fw_rnns.keys()).intersection(bw_rnns.keys())) + bi_rnns = [] + matched_rnn = [] + for inp in birnn_input: + fw_rnn = fw_rnns[inp] + bw_rnn = bw_rnns[inp] + # it's possible several bi-rnns share the same input + for fw_n in fw_rnn: + for bw_n in bw_rnn: + if belong_to_birnn(g, fw_n, bw_n, rnn_type) and \ + fw_n not in matched_rnn and bw_n not in matched_rnn: + logger.debug("found birnn comprising %s and %s", fw_n.name, bw_n.name) + bi_rnns.append((fw_n, bw_n)) + matched_rnn.extend([fw_n, bw_n]) + return bi_rnns + + +def belong_to_birnn(g, fw_rnn, bw_rnn, rnn_type): + """ + Check whether fw_rnn and bw_rnn are part of the same birnn. + If fw_rnn and bw_rnn have the same attributes except those related to activation + and share the same seq_len, they are able to be merged into a bi-rnn. + """ + logger.debug("check whether %s and %s are part of birnn", fw_rnn.name, bw_rnn.name) + for name in onnx_rnn_attr_mapping[rnn_type]: + fw_attr_value = fw_rnn.get_attr_value(name) + bw_attr_value = bw_rnn.get_attr_value(name) + if fw_attr_value != bw_attr_value: + logger.debug( + "fw_rnn and bw_rnn mismatch at attr %s: %s, %s", + name, fw_attr_value, bw_attr_value + ) + return False + + seq_len_index = onnx_rnn_seq_len_index_mapping[rnn_type] + fw_seq_len = fw_rnn.input[seq_len_index] + bw_seq_len = bw_rnn.input[seq_len_index] + if not utils.have_same_inference_value(g, fw_seq_len, bw_seq_len): + logger.debug( + "fw_rnn and bw_rnn have different seq_len input: %s, %s", + fw_seq_len, bw_seq_len + ) + return False + + return True + + +def get_reverse_nodes_after_y_output(g, rnn_bw): + bw_consumers = g.find_output_consumers(rnn_bw.output[0]) + + # todo: figure out a better way to remove reverse op + squeeze_nodes = [c for c in bw_consumers if c.type == "Squeeze"] + s_cnt = len(squeeze_nodes) + if s_cnt == 1: + s = squeeze_nodes[0] + trans_nodes = g.find_output_consumers(s.output[0]) + if len(trans_nodes) == 1: + if trans_nodes[0].type == "Transpose": + reverse_nodes = g.find_output_consumers(trans_nodes[0].output[0]) + elif utils.is_tf_reverse_op(trans_nodes[0]): + reverse_nodes = trans_nodes + else: + logger.debug("not found reverse op, unexpected") + return [] + + are_all_reverse = all([utils.is_tf_reverse_op(r_op) for r_op in reverse_nodes]) + if are_all_reverse: + return reverse_nodes + + logger.debug("bw y output is used followed by reverse node") + return [] + + logger.debug("unexpected number of transpose after RNN 1st output:%s", s_cnt) + return [] + + logger.debug("unexpected number of squeeze following RNN 1st output:%s", s_cnt) + return [] + + +def get_np_val_for_const(g, node, input_index): + return node.inputs[input_index].get_tensor_value(as_list=False) + + +def check_const(g, input_id): + node = g.get_node_by_output(input_id) + if node and node.is_const(): + return (True, node.get_tensor_value(as_list=False)) + return (None, None) + + +def process_single_init_node(g, fw_init_input_id, bw_init_input_id, to_append): + fw_init_is_const, init_fw_val = check_const(g, fw_init_input_id) + bw_init_is_const, init_bw_val = check_const(g, bw_init_input_id) + if fw_init_is_const and bw_init_is_const: + initial_val = np.concatenate((init_fw_val, init_bw_val), axis=0) + init_name = utils.make_name("initial") + init_node = g.make_const(init_name, initial_val, skip_conversion=True) + else: + init_node = g.make_node("Concat", [fw_init_input_id, bw_init_input_id], attr={"axis": 0}) + + to_append.append(init_node) + return init_node + + +def slice_birnn_for_original_rnn_consumers(g, rnn_fw, rnn_bw, bi_rnn, rnn_output_index, all_nodes, to_remove): + fw_consumers = g.find_output_consumers(rnn_fw.output[rnn_output_index]) + bw_consumers = g.find_output_consumers(rnn_bw.output[rnn_output_index]) + if not fw_consumers and not bw_consumers: + return + + if rnn_output_index == 0: + axis = 1 + # remove reverse op for rnn_bw + reverse_nodes = get_reverse_nodes_after_y_output(g, rnn_bw) + + for r_op in reverse_nodes: + logger.debug("remove reverse op %s", r_op.name) + g.replace_all_inputs(r_op.output[0], r_op.input[0], ops=all_nodes) + to_remove.append(r_op.name) + elif rnn_output_index in [1, 2]: + axis = 0 + else: + raise ValueError("rnn only should has 3 outputs.") + + if fw_consumers: + attr = {"axes": [axis], "starts": [0], "ends": [1]} + inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr} + slice_node_fw = GraphBuilder(g).make_slice(inputs_map) + all_nodes.append(g.get_node_by_output(slice_node_fw)) + g.replace_all_inputs(rnn_fw.output[rnn_output_index], slice_node_fw, ops=fw_consumers) + + if bw_consumers: + attr = {"axes": [axis], "starts": [1], "ends": [2]} + inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr} + slice_node_bw = GraphBuilder(g).make_slice(inputs_map) + all_nodes.append(g.get_node_by_output(slice_node_bw)) + g.replace_all_inputs(rnn_bw.output[rnn_output_index], slice_node_bw, ops=bw_consumers) + + +def remove_reverse_in_bw_input(g, bw_rnn_input_x, rnn_type): + old_x_consumers = g.find_output_consumers(bw_rnn_input_x) + # the transpose/reverse here must be followed by RNN if it is still useful. + # this is guaranteed by dynamic_rnn logic. + old_x_has_rnn_as_consumer = [n for n in old_x_consumers if n.type == onnx_rnn_type_mapping[rnn_type]] + if not old_x_has_rnn_as_consumer: + logger.debug("plan to remove useless reverse op in bw") + reverse_node = g.get_node_by_output(bw_rnn_input_x) + + if reverse_node.type == "Transpose": + reverse_node = reverse_node.inputs[0] + + g.replace_all_inputs(reverse_node.output[0], reverse_node.input[0]) # ops=g.get_nodes() + g.remove_node(reverse_node.name) + else: + raise ValueError("Reverse is still used by RNN as input, cannot remove") diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/thresholded_relu_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/thresholded_relu_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..907a0516010de03ee6ec8ed920428a5942347ac9 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/thresholded_relu_rewriter.py @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow subgraph to onnx ThresholdedRelu op +""" + +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher +from tf2onnx.rewriter.leakyrelu_rewriter import _find_edge_name_between_nodes + + +# pylint: disable=missing-docstring + + +def rewrite_thresholded_relu(g, ops): + if g.opset < 10: + return ops + + pattern = \ + OpTypePattern('Mul', name='mul', inputs=[ + OpTypePattern('Cast', name='cast', inputs=[ + OpTypePattern('Greater', name='greater', inputs=[ + OpTypePattern('*', name='greater_input'), + OpTypePattern('Const', name='theta') + ]) + ]), + OpTypePattern('*', name='mul_input') + ]) + matcher = GraphMatcher(pattern, allow_reorder=True) + match_results = list(matcher.match_ops(ops)) + + for match in match_results: + greater_node = match.get_op('greater') + greater_input_node = match.get_op('greater_input') + mul_node = match.get_op("mul") + mul_input_node = match.get_op('mul_input') + cast_node = match.get_op('cast') + + greater_input_edge_name = _find_edge_name_between_nodes(greater_input_node, greater_node) + mul_input_edge_name = _find_edge_name_between_nodes(mul_input_node, mul_node) + if greater_input_edge_name == mul_input_edge_name: + theta = match.get_op('theta').get_tensor_value() + thresholded_relu = g.make_node("ThresholdedRelu", inputs=[mul_input_edge_name], attr={"alpha": theta}, + shapes=[g.get_shape(mul_node.output[0])], + dtypes=[g.get_dtype(mul_node.output[0])]) + g.replace_all_inputs(mul_node.output[0], thresholded_relu.output[0], ops=ops) + to_delete = [cast_node, mul_node] + g.safe_remove_nodes(to_delete) + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/transpose_rewriter.py b/lib/python3.10/site-packages/tf2onnx/rewriter/transpose_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..277e68b2708a195d04a9881828baec720d9d74bc --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/transpose_rewriter.py @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter - rewrite tensorflow transpose op +""" + +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + + +# pylint: disable=missing-docstring + + +def rewrite_transpose(g, ops): + pattern = \ + OpTypePattern('Transpose', name='output', inputs=[ + OpTypePattern(None), + OpTypePattern('Sub', inputs=[ + OpTypePattern('Sub', inputs=["*", "*"]), + OpTypePattern('Range', inputs=["*", "*", "*"]), + ]), + ]) + + matcher = GraphMatcher(pattern) + match_results = list(matcher.match_ops(ops)) + for match in match_results: + output = match.get_op('output') + shape = g.get_shape(output.input[0]) + dims = range(len(shape) - 1, -1, -1) + output.set_attr("perm", dims) + g.remove_input(output, output.input[1], 1) + to_delete = [n for n in match.get_nodes() if n != output] + g.safe_remove_nodes(to_delete) + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/rewriter/unit_rnn_rewriter_base.py b/lib/python3.10/site-packages/tf2onnx/rewriter/unit_rnn_rewriter_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e7ca886b83f2e9e449ac11fb1ff8f111ce84f2b9 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/rewriter/unit_rnn_rewriter_base.py @@ -0,0 +1,306 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.rewriter.unit_rnn_rewriter_base +""" + +from __future__ import division +from __future__ import print_function +import logging + +from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context +from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT, get_pattern, \ + get_rnn_scope_name, parse_rnn_loop, seq_len_pattern0, seq_len_pattern1 +from tf2onnx.utils import is_tf_select_op, is_tf_tensor_array_write_op +from tf2onnx.graph_matcher import GraphMatcher +from tf2onnx.graph_builder import GraphBuilder + + +logger = logging.getLogger(__name__) + + +# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access + +class UnitRnnContext(Context): + def __init__(self): + super(UnitRnnContext, self).__init__() + self.rnn_scope = None + self.cell_match = None # matched cell + + self.weights = {} + self.seq_len_node = None + self.state_variables = {} + self.input_size = None + self.hidden_size = None + + self.attributes = {} # onnx attributes + # onnx inputs: [X, W, R, B, sequence_lens, initial_h, initial_c, P], + # sequence_lens is optional, i.e., None + self.onnx_input_ids = {} + + +class UnitRnnRewriterBase(LoopRewriterBase): + """ + main procedures: + 1 extract info of while_loop based on loop_rewriter_base + 2 check whether extracted loop is a unit rnn, fall back in necessity: + 1 parse rnn scope name + 2 check if it's a dynamic_rnn + 3 find needed info from tensorflow graph + 3 process found info according to ONNX requirement + """ + def __init__(self, g): + super(UnitRnnRewriterBase, self).__init__(g) + # {var_name: (finder, connector)} + self.state_variable_handler = None + self.state_variable_handlers = None + + def create_context(self): + return UnitRnnContext() + + def run(self): + return self.run_internal() + + def need_rewrite(self, context): + context.rnn_scope = get_rnn_scope_name(context.while_context_scope) + + if not parse_rnn_loop(self.g, context.loop_properties, context.rnn_scope, + context.while_context_scope): + logger.debug("parse_rnn_loop failed, SKIP") + return False + + if not self.parse_unit_rnn(context): + logger.debug("failed to parse unit rnn, SKIP") + return False + + if not self.is_valid(context): + logger.debug("parsed rnn is not valid, SKIP") + return False + return True + + def is_valid(self, context): + return True + + def parse_unit_rnn(self, context): + """ + parse needed info from tensorflow graph: + 1 weight + 2 state variables used in rnn unit, such as c_t, h_t + 3 sequence node + 4 input_x + 5 attributes, e.g., activation_alpha, activation_beta... optional + """ + logger.debug("parse unit rnn") + + logger.debug("match unit cell against loop body graph") + cell_match = self.find_cell(context) + if not cell_match: + logger.debug('failed to match cell pattern') + return False + context.cell_match = cell_match + + logger.debug("get_weight_and_bias starts") + weights = self.get_weight_and_bias(context) + if not weights: + logger.debug("rnn weights check failed, SKIP") + return False + context.weights = weights + + if not self.get_state_variables(context): + logger.debug("no cell variable initializers found, SKIP") + return False + + seq_len_node = self.find_sequence_length_node(context) + if seq_len_node: + logger.debug("find sequence node: %s", seq_len_node.name) + context.onnx_input_ids["sequence_lens"] = seq_len_node.output[0] + else: + context.onnx_input_ids["sequence_lens"] = None + + # require exact one input + inputs = context.loop_properties.scan_inputs_initial_values + if len(inputs) != 1: + logger.debug("found %d inputs for the unit rnn: %s", + len(inputs), inputs) + return False + context.onnx_input_ids["X"] = inputs[0] + + if not self.parse_attributes(context): + logger.debug("wrong attributes found") + return False + + return True + + def find_cell(self, context): + raise NotImplementedError() + + def _match_cell(self, context, unittype): + """match unit cell""" + for cell_pattern in get_pattern(unittype): + matcher = GraphMatcher(cell_pattern, allow_reorder=True) + + loop_props = context.loop_properties + inputs = loop_props.state_inputs + loop_props.scan_inputs + input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs] + outputs = loop_props.state_outputs + loop_props.scan_outputs + output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs] + body_graph_ops, _, _ = LoopRewriterBase.find_subgraph( + set(input_ids), + set(output_ids), + self.g, merge_as_end=True + ) + + match_results = list(matcher.match_ops(body_graph_ops)) + if len(match_results) == 1: + return match_results[0] + return None + + def get_weight_and_bias(self, context): + raise NotImplementedError() + + def parse_attributes(self, context): + return True + + def rewrite(self, context): + logger.debug("enter unit rnn rewrite function") + + logger.debug("process the weights/bias/ft_bias, to fit onnx weights/bias requirements") + self.process_weights_and_bias(context) + + self.process_var_init_nodes(context) + + logger.debug("start to build new rnn node") + + rnn_node = self.create_rnn_node(context) + context.rnn_node = rnn_node + + logger.debug("start to handle outputs") + # format of ONNX output is different with tf + self.process_outputs(context) + + logger.debug("rewrite successfully") + return REWRITER_RESULT.OK + + def get_state_variables(self, context): + """ + Get state variables by provided handlers. There maybe several handlers corresponding to + different patterns of state variables. + The commone method is to find state variables from loop property according to its + next_iteration_input and switch_true_identity_output, see lstm_rewriter_v2 + """ + for handler in self.state_variable_handlers: + can_handle = True + for var_name, funcs in handler.items(): + finder = funcs[0] + state_variable = finder(context) + if state_variable: + logger.debug("found state variable %s", var_name) + context.state_variables[var_name] = state_variable + else: + logger.debug("failed to get state variable %s", var_name) + can_handle = False + break + if can_handle: + self.state_variable_handler = handler + return True + return False + + def find_sequence_length_node(self, context): + # get any state variable + state_variable = list(context.state_variables.values())[0] + next_iter_input_node = self.g.get_node_by_output(state_variable.next_iteration_input.id) + if not is_tf_select_op(next_iter_input_node): + logger.debug("no sequence length node is given") + return None + matcher = GraphMatcher(seq_len_pattern0) + match_result = matcher.match_op(next_iter_input_node) + if not match_result: + matcher = GraphMatcher(seq_len_pattern1) + match_result = matcher.match_op(next_iter_input_node) + if not match_result: + raise RuntimeError("failed to find sequence length.") + return match_result.get_op("seq_len_node") + + def process_weights_and_bias(self, context): + raise NotImplementedError() + + def process_var_init_nodes(self, context): + raise NotImplementedError() + + def create_rnn_node(self, context): + raise NotImplementedError() + + def process_outputs(self, context): + for var_name, funcs in self.state_variable_handler.items(): + output_connector = funcs[1] + output_connector(context) + logger.debug("connect output of %s to graph", var_name) + + self.connect_unit_rnn_output_to_graph(context) + + def connect_unit_rnn_output_to_graph(self, context): + outputs = context.loop_properties.scan_outputs_exits + if not outputs: + logger.debug("no one consume output") + return + + gather_output_id = outputs[0].id + logger.debug("found output for rnn: %s", gather_output_id) + + # in tf batch major mode, output shape is : [batch, time, hidden] + # in time major mode, output shape is: [time, batch, hidden] + # in onnx, output shape is : [time, num_directions, batch, hidden] + + rnn_node = context.rnn_node + output_id = rnn_node.output[0] + rnn_output_shape = self.g.get_shape(output_id) + squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]] + gb = GraphBuilder(self.g) + squeeze_node = gb.make_squeeze({'data': output_id, "axes": [1]}, + shapes=[squeeze_output_shape], + dtypes=[self.g.get_dtype(output_id)], + return_node=True) + self.g.replace_all_inputs(gather_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes() + + def _find_state_variable_with_select(self, context, + next_iteration_input, + switch_true_identity_consumers): + """ + Find state variables from switch_true_identity_consumers to next_iteration_input. + Select maybe added after next_iteration_input. + """ + # find all select not followed by TensorArrayWrite + select = [] + for c in self.g.find_output_consumers(next_iteration_input): + if not is_tf_select_op(c): + continue + out_ta_writer = [ + o for o in self.g.find_output_consumers(c.output[0]) if is_tf_tensor_array_write_op(o) + ] + if out_ta_writer: + continue + select.append(c) + if len(select) == 1: + next_iteration_input = select[0].output[0] + switch_true_identity_consumers.append(select[0]) + + logger.debug( + "try to find state variable from [%s, %s]", + next_iteration_input, + switch_true_identity_consumers + ) + + def checker(state_variable): + if state_variable.next_iteration_input.id != next_iteration_input: + return False + for consumer in switch_true_identity_consumers: + if state_variable.switch_true_identity_output.id not in consumer.input: + return False + return True + + state_variables = context.loop_properties.get_variables(checker) + if len(state_variables) != 1: + logger.debug("found %d state variables", len(state_variables)) + return None + return state_variables[0] diff --git a/lib/python3.10/site-packages/tf2onnx/schemas.py b/lib/python3.10/site-packages/tf2onnx/schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..800d39f0f0a2e3d1d31c6921ddea8314faa3727d --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/schemas.py @@ -0,0 +1,191 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.schema +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import copy +from collections import defaultdict, OrderedDict +from onnx import defs, helper, TensorProto, OperatorSetIdProto, shape_inference + +from . import constants +from . import utils + +logger = logging.getLogger(__name__) + + +class OnnxOpSchema(object): + """Wrapper for Onnx schema.""" + + def __init__(self, name, domain, since_version, attributes): + """Create a Onnx schema + Args: + name (str): op name + attributes (List[str]): valid attributes + domain (str): default value "" means it's Onnx domain + since_version (int): opset version, default is 1 + """ + self._name = name + self._domain = domain + self._attributes = attributes + self._since_version = since_version + + @property + def attributes(self): + return self._attributes + + @property + def domain(self): + return self._domain + + @property + def name(self): + return self._name + + @property + def since_version(self): + return self._since_version + + @staticmethod + def from_onnx_schema(onnx_schema): + name = onnx_schema.name + domain = onnx_schema.domain + since_version = int(onnx_schema.since_version) + attributes = onnx_schema.attributes + return OnnxOpSchema(name, domain, since_version, attributes) + + def has_attribute(self, attr): + return attr in self.attributes + + +def _register_all_schemas_with_history(): + """Register all schemas with history""" + onnx_schemas = defs.get_all_schemas_with_history() + name_domain_version_schema_map = defaultdict(lambda: defaultdict(dict)) + for s in onnx_schemas: + schema = OnnxOpSchema.from_onnx_schema(s) + name_domain_version_schema_map[schema.name][schema.domain][schema.since_version] = schema + + ordered_map = defaultdict(lambda: defaultdict(OrderedDict)) + for name, domain_version_schema_map in name_domain_version_schema_map.items(): + for domain, version_schema_map in domain_version_schema_map.items(): + ordered_map[name][domain] = OrderedDict( + sorted(version_schema_map.items(), key=lambda x: -x[0]) + ) + return ordered_map + + +def _parse_domain_opset_versions(schemas): + """ Get max opset version among all schemas within each domain. """ + domain_opset_versions = dict() + for domain_version_schema_map in schemas.values(): + for domain, version_schema_map in domain_version_schema_map.items(): + # version_schema_map is sorted by since_version in descend order + max_version = next(iter(version_schema_map)) + if domain not in domain_opset_versions: + domain_opset_versions[domain] = int(max_version) + else: + domain_opset_versions[domain] = max(domain_opset_versions[domain], int(max_version)) + return domain_opset_versions + + +# format is >> +# SinceVersion is sorted from high to low +_schemas = _register_all_schemas_with_history() + +_domain_opset_versions = _parse_domain_opset_versions(_schemas) + + +def get_schema(name, max_inclusive_opset_version, domain=None): + """Get schema by name within specific version.""" + domain = domain or constants.ONNX_DOMAIN + domain_version_schema_map = _schemas[name] + version_schema_map = domain_version_schema_map[domain] + for version, schema in version_schema_map.items(): + if version <= max_inclusive_opset_version: + return schema + return None + + +def get_max_supported_opset_version(domain=None): + """Get max supported opset version by current onnx package given a domain.""" + domain = domain or constants.ONNX_DOMAIN + return _domain_opset_versions.get(domain, None) + + +def infer_onnx_shape_dtype(node, opset_version, input_shapes, input_dtypes, initializers=None): + """ + Infer shapes and dtypes for outputs of the node. + Sometimes, shape inference needs the values of node's inputs, so initializers are used. + """ + + def build_onnx_op(node): + """Build onnx op""" + onnx_node = helper.make_node(node.type, node.input, node.output, name=node.name) + # deal with attributes + attr = [] + attr_graphs = node.get_body_graphs() + if attr_graphs: + for attr_name, sub_graph in attr_graphs.items(): + copied_sub_graph = copy.deepcopy(sub_graph) + graph_proto = copied_sub_graph.make_graph("graph for " + node.name + " " + attr_name) + attr.append(helper.make_attribute(attr_name, graph_proto)) + attr.extend(node.get_onnx_attrs().values()) + if attr: + onnx_node.attribute.extend(attr) + return onnx_node + + inputs = [] + outputs = [] + for inp, shape, dtype in zip(node.input, input_shapes, input_dtypes): + inputs.append(utils.make_onnx_inputs_outputs(inp, dtype, shape)) + for output in node.output: + outputs.append(utils.make_onnx_inputs_outputs(output, TensorProto.UNDEFINED, None)) + graph_proto = helper.make_graph([build_onnx_op(node)], "infer-graph", inputs, outputs, initializer=initializers) + imp = OperatorSetIdProto() + imp.version = opset_version + model_proto = helper.make_model(graph_proto, opset_imports=[imp]) + + inferred_model = None + try: + inferred_model = shape_inference.infer_shapes(model_proto) + except Exception: # pylint: disable=broad-except + logger.warning( + "ONNX Failed to infer shapes and dtypes for [%s, type: %s]", + node.name, node.type, exc_info=1 + ) + return None, None + + shapes = {} + dtypes = {} + for output in inferred_model.graph.output: + tensor_type = output.type.tensor_type + if tensor_type.HasField("elem_type"): + dtypes[output.name] = tensor_type.elem_type + else: + dtypes[output.name] = TensorProto.UNDEFINED + # 0 in shapes of onnx means unknown which is -1 in our convertor + if tensor_type.HasField("shape"): + shapes[output.name] = [ + dim.dim_value if dim.dim_value != 0 else utils.ONNX_UNKNOWN_DIMENSION for dim in tensor_type.shape.dim + ] + else: + shapes[output.name] = None + output_shapes = [] + output_dtypes = [] + for output in node.output: + if output in shapes: + output_shapes.append(shapes[output]) + else: + output_shapes.append(None) + if output in dtypes: + output_dtypes.append(dtypes[output]) + else: + output_dtypes.append(TensorProto.UNDEFINED) + return output_shapes, output_dtypes diff --git a/lib/python3.10/site-packages/tf2onnx/shape_inference.py b/lib/python3.10/site-packages/tf2onnx/shape_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..bffdfccb259402a6f167b6055076ed7efc687463 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/shape_inference.py @@ -0,0 +1,576 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.shape_inference - shape inference function for tf2onnx +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals +import logging +from distutils.version import LooseVersion +from collections import defaultdict +import numpy as np +from tf2onnx import utils +from tf2onnx.tf_utils import get_tf_tensor_shape, get_tf_const_value, get_tf_shape_attr, get_tf_version +from tf2onnx.tf_loader import tf_reload_graph + +# pylint: disable=logging-not-lazy,missing-docstring,consider-swap-variables + + +logger = logging.getLogger(__name__) + + +def infer_shape(tf_graph, shape_override): + """Infer shape for TF graph with shape_override set first.""" + if shape_override: + logger.info("Apply shape override:") + for name, shape in shape_override.items(): + logger.info("\tSet %s shape to %s", name, shape) + tf_graph.get_tensor_by_name(name).set_shape(shape) + tf_graph = tf_reload_graph(tf_graph) + + tf_graph = infer_shape_for_graph(tf_graph) + + op_outputs_with_none_shape = check_shape_for_tf_graph(tf_graph) + if op_outputs_with_none_shape: + if get_tf_version() > LooseVersion("1.5.0"): + for op, outs in op_outputs_with_none_shape.items(): + logger.warning( + "Cannot infer shape for %s: %s", + op, ",".join(outs) + ) + tf_graph = infer_shape_for_graph_legacy(tf_graph) + + return tf_graph + + +def check_shape_for_tf_graph(tf_graph): + """ + Check whether TF graph misses any shape, + and return all ops with None shape outputs for TF graph. + """ + skip_list = {'FusedBatchNormV3': 5} + op_outputs_mapping_none_shape = defaultdict(list) + for op in tf_graph.get_operations(): + for i, out in enumerate(op.outputs): + if op.type in skip_list: + if skip_list[op.type] == i: + continue + if get_tf_tensor_shape(out) is None: + op_outputs_mapping_none_shape[op.name].append(out.name) + return op_outputs_mapping_none_shape + + +def infer_shape_for_graph(tf_graph): + """ + Infer shape for Tensorflow ops. + Tensorflow explicitly sets shape for some ops in python code, such as Switch, Merge and TensorArrayGather. + These shapes may be lost after freezing TF graph to graph_def without add_shapes=True. + To bring these shapes back, we implement our own shape inference for these control flow ops based on one assumption: + **outputs of Merge op have the same shape (at least the same rank) of its inputs**. + With this assumption, our shape inference can handle: + 1. in tf.cond, outputs of two branches have the same rank. + 2. in tf.while_loop, loop variables don't change their rank. + """ + shape_updated = True + while shape_updated: + shape_updated = False + for o in tf_graph.get_operations(): + updated = infer_shape_for_op(o) + if updated: + shape_updated = True + if shape_updated: + tf_graph = tf_reload_graph(tf_graph) + return tf_graph + + +def infer_shape_for_op(op): + has_unknown_output_shape = any(get_tf_tensor_shape(out) is None for out in op.outputs) + + if not has_unknown_output_shape: + return False + + if op.type == "Placeholder": + # if placeholder shape is not found, try to get it from "shape" attribute. + attr_shape = get_tf_shape_attr(op) + if attr_shape is not None: + new_shape = list(attr_shape) + op.outputs[0].set_shape(new_shape) + logger.debug("set placeholder op [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + logger.warning("Shape of placeholder '%s' is unknown, treated it as a scalar. Please use the --inputs flag " + "and append the shape to the input name if this input is not a scalar.", op.name) + op.outputs[0].set_shape([]) + return True + + if op.type == "Merge": + s1 = get_tf_tensor_shape(op.inputs[0]) + s2 = get_tf_tensor_shape(op.inputs[1]) + new_shape = None + if s1 is None and s2 is None: + return False + if s1 is None and s2 is not None: + new_shape = s2 + if s1 is not None and s2 is None: + new_shape = s1 + + if new_shape is not None: + op.inputs[0].set_shape(new_shape) + op.inputs[1].set_shape(new_shape) + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + + # inputs' shapes both exist + if s1 != s2: + if len(s1) != len(s2): + logger.warning("Shapes of Merge %s have different ranks: %s, %s", op.name, len(s1), len(s2)) + return False + + logger.debug("Inputs of Merge %s have different shapes: %s, %s, but the same rank", op.name, s1, s2) + new_shape = _merge_shapes_for_tf(s1, s2) + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + else: + new_shape = s1 + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + + return True + + if op.type == "Switch": + new_shape = get_tf_tensor_shape(op.inputs[0]) + if new_shape is not None: + op.outputs[0].set_shape(new_shape) + op.outputs[1].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[1].name, new_shape) + return True + return False + + if op.type == "Enter": + new_shape = get_tf_tensor_shape(op.inputs[0]) + if new_shape is not None: + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + return False + + if op.type == "TensorArrayGatherV3": + # TensorArrayGatherV3's output: all of the elem in the TensorArray, + # concatenated along a new axis (the new dimension 0), so shape of TensorArray should be found first. + # And TensorArrayWrite will write elem to TensorArray, so shape of TensorArray can be got from TensorArrayWrite + # so the process is: first find TensorArrayWrite and then get TensorArray's shape, + # and finally add one dim to the shape is shape of TensorArrayGather + + handle_op = op.inputs[0].op + if handle_op.type != "TensorArrayV3": + return False + + # find TensorArrayWrite + tensor_array_write_op = _find_tensorarray_write(handle_op) + if not tensor_array_write_op: + return False + # get TensorArray shape from input tensor of the found TensorArrayWrite op + shape = get_tf_tensor_shape(tensor_array_write_op.inputs[2]) + # update TensorArray's shape info + if shape is not None: + new_shape = [None] + shape + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + return False + + if op.type == "TensorArrayReadV3": + # TensorArrayRead reads an element from the TensorArray into output value. + # The TensorArray's shape can be got from TensorArrayScatter. + # So the process is: first find TensorArrayScatter's shape and then TensorArray's + # and finally take its last n-1 dim. + flow_in_op = op.inputs[2].op + if flow_in_op.type != "Enter": + return False + + scatter_op = flow_in_op.inputs[0].op + if scatter_op.type != "TensorArrayScatterV3": + return False + + value_shape_before_scatter = get_tf_tensor_shape(scatter_op.inputs[2]) + if value_shape_before_scatter is None: + return False + + new_shape = value_shape_before_scatter[1:] + if new_shape is not None: + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + return False + + return False + + +def _find_tensorarray_write(op): + utils.make_sure(op.type == "TensorArrayV3", "op should be tensorarray") + + tensor_array_consumers = op.outputs[0].consumers() + for i in tensor_array_consumers: + if i.type == "Enter": + consumer_ops = i.outputs[0].consumers() + for j in consumer_ops: + if j.type == "TensorArrayWriteV3": + return j + return None + + +def _merge_shapes_for_tf(shape1, shape2): + """ + Merge 2 shapes, return merged shape, set unknown for dims with different values. + Raise exception for mismatch. + """ + if shape1 is None: + return shape2 + if shape2 is None: + return shape1 + + utils.make_sure(utils.is_list_or_tuple(shape1), "invalid type for shape1") + utils.make_sure(utils.is_list_or_tuple(shape2), "invalid type for shape2") + utils.make_sure(len(shape1) == len(shape2), "shapes rank mismatch: shape1=%s, shape2=%s", shape1, shape2) + + merged = [] + for d1, d2 in zip(shape1, shape2): + d = d1 + if d1 is None: + d = d2 + elif d2 is not None: + # None means unknown in tensorflow + d = None + merged.append(d) + return merged + + +###################################################################### +#### Below is our old tf shape inference as a supplementary #### +#### and a subtitute for TF 1.5.0 #### +###################################################################### + +direct_ops = [ + "Cast", + "Exit", + "Floor", + "Identity", + "LogicalNot", + "ReverseSequence", + "Relu6", + "Sigmoid", + "Square", + "Tanh" +] +broadcast_ops = [ + "Add", + "Greater", + "GreaterEqual", + "Less", + "LessEqual", + "LogicalAnd", + "LogicalOr", + "Mul", + "RealDiv", + "Sub" +] + + +def infer_shape_for_graph_legacy(tf_graph): + shape_updated = True + while shape_updated: + shape_updated = False + for op in tf_graph.get_operations(): + updated = infer_shape_for_op_legacy(op) + if updated: + shape_updated = True + + return tf_graph + + +def infer_shape_for_op_legacy(op): + # invoke tf shape inference first + infer_shape_for_op(op) + + has_unknown_input_shape = any(get_tf_tensor_shape(inp) is None for inp in op.inputs) + has_unknown_output_shape = any(get_tf_tensor_shape(out) is None for out in op.outputs) + + # an input shape may be inferred from op output or other input shapes + # try to infer it first + if has_unknown_input_shape: + if infer_input_shapes(op): + return True + + if not has_unknown_output_shape: + return False + + # for those ops, we don't expect all input shapes available to infer output shapes. + ret = infer_output_shapes_with_partial_inputs(op) + if ret is not None: + return ret + + # for ops, we need all input shapes ready to infer output shapes. + are_all_input_shape_ready = True + no_shape = [] + for i in op.inputs: + if get_tf_tensor_shape(i) is None: + are_all_input_shape_ready = False + no_shape.append(i.name) + + if not are_all_input_shape_ready: + logger.debug("op %s has inputs don't have shape specified, they are: %s", op.name, no_shape) + return False + + if op.type in direct_ops: + return set_shape_from_input(op.inputs[0], op.outputs[0]) + + if op.type in broadcast_ops: + return set_shape_from_inputs_broadcast(op.inputs, op.outputs[0]) + + if op.type == "RandomUniform": + shape_op = op.inputs[0].op + if not shape_op or shape_op.type != "Shape": + return False + return set_shape_from_input(shape_op.inputs[0], op.outputs[0]) + + if op.type == "Gather": + # uses the follwing link to know how to infer shape of output + # https://www.tensorflow.org/api_docs/python/tf/gather + shape_params = get_tf_tensor_shape(op.inputs[0]) + shape_indices = get_tf_tensor_shape(op.inputs[1]) + # gather can only have 2 inputs + # https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/gather.html + if len(op.inputs) == 3: + axis_op = op.inputs[2].op + if not utils.is_tf_const_op(axis_op): + return False + axis = get_tf_const_value(axis_op) + else: + axis = 0 + + shape = shape_params[:axis] + shape_indices + shape_params[axis + 1:] + op.outputs[0].set_shape(shape) + return True + + if op.type in ["All", "Any", "Max", "Min"]: + axis_op = op.inputs[1].op + if not utils.is_tf_const_op(axis_op): + return False + axis = get_tf_const_value(axis_op) + if not isinstance(axis, list): + axis = [axis] + keep_dims = op.get_attr("keep_dims") + shape = get_tf_tensor_shape(op.inputs[0]) + for i, _ in enumerate(axis): + if axis[i] < 0: + axis[i] += len(shape) + + new_shape = [] + for i, _ in enumerate(shape): + if i in axis: + if keep_dims: + new_shape.append(1) + else: + new_shape.append(shape[i]) + + op.outputs[0].set_shape(new_shape) + logger.debug("set %s op [%s] with new shape %s", op.type, op.outputs[0].name, new_shape) + return True + + if op.type == "ExpandDims": + # https://www.tensorflow.org/api_docs/python/tf/expand_dims + input_shape = get_tf_tensor_shape(op.inputs[0]) + dim_op = op.inputs[1].op + if input_shape is None or not utils.is_tf_const_op(dim_op): + return False + + dim = get_tf_const_value(dim_op) + if dim < 0: + dim = dim + len(input_shape) + 1 + + new_shape = input_shape[:dim] + [1] + input_shape[dim:] + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + + if op.type == "Unpack": + input_shape = get_tf_tensor_shape(op.inputs[0]) + if input_shape is None: + return False + + axis = op.get_attr("axis") + axis = axis if axis >= 0 else axis + len(input_shape) + # the link below says that the rank of output is "rank(input) -1", + # from this statement "num" must equal to input_shape[axis], and if not tf will throw a runtime error + # https://www.tensorflow.org/api_docs/python/tf/unstack + new_shape = input_shape[:axis] + input_shape[axis + 1:] + for output in op.outputs: + output.set_shape(new_shape) + logger.debug("set %s op [%s] with new shape %s", op.type, output.name, new_shape) + return True + + if op.type in ["Minimum", "Maximum"]: + # ops that are elementwise and support broadcasting + input_shapes = [get_tf_tensor_shape(op) for op in op.inputs] + new_shape = broadcast_shape_inference(*input_shapes) + op.outputs[0].set_shape(new_shape) + return True + + return False + + +def infer_input_shapes(op): + if op.type in ["Select", "SelectV2"]: + shape_t = get_tf_tensor_shape(op.inputs[1]) + shape_e = get_tf_tensor_shape(op.inputs[2]) + # copy shape if t OR e does not have a shape, no update if t AND e both have shapes + if shape_t is None or shape_e is None: + new_shape = shape_t or shape_e + if new_shape is not None: + op.inputs[1].set_shape(new_shape) + op.inputs[2].set_shape(new_shape) + logger.debug("set [%s, %s] with new shape %s", op.inputs[1].name, op.inputs[2].name, new_shape) + return True + return False + + +def infer_output_shapes_with_partial_inputs(op): + # output shape of concat op: only the dim val of concatenated dim will be changed + # so only partial(at least one) input shapes need to be known to infer output shape of concat op + if utils.is_tf_concat_op(op): + data_inputs = op.inputs[:-1] + input_shapes = [get_tf_tensor_shape(inp) for inp in data_inputs] + input_shapes = [shape for shape in input_shapes if shape is not None] + if not input_shapes: + logger.debug("all input shapes of concat op %s are None, can't infer its output shape", op.name) + return False + + new_shape = input_shapes[0] + axis_op = op.inputs[-1] + rank = len(new_shape) + if not utils.is_tf_const_op(axis_op): + op.outputs[0].set_shape([-1] * rank) + return True + + axis = get_tf_const_value(axis_op) + axis = axis if axis >= 0 else axis + rank + new_shape[axis] = -1 + if len(input_shapes) == len(data_inputs): # all input shapes are known + concat_dim_vals = list(np.array(input_shapes)[:, axis]) + # only when inputs' shape are known, then val of concat dim can be calculated + if concat_dim_vals.count(-1) == 0: + new_shape[axis] = sum(concat_dim_vals) + + op.outputs[0].set_shape(new_shape) + logger.debug("set Concat op [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + + if op.type in ["Select", "SelectV2"]: + new_shape = get_tf_tensor_shape(op.inputs[1]) + if new_shape is None: + new_shape = get_tf_tensor_shape(op.inputs[2]) + if new_shape is not None: + op.outputs[0].set_shape(new_shape) + op.inputs[1].set_shape(new_shape) + op.inputs[2].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + return False + + if op.type == "Pack": + axis = op.get_attr("axis") + input_shape = None + for i in op.inputs: + s = get_tf_tensor_shape(i) + if s is not None: + input_shape = s + break + if input_shape is None: + return False + if axis < 0: + axis += len(input_shape) + for i in op.inputs: + if not get_tf_tensor_shape(i): + i.set_shape(input_shape) + logger.debug("set [%s] with new shape %s", i.name, input_shape) + new_shape = input_shape[:axis] + [len(op.inputs)] + input_shape[axis:] + op.outputs[0].set_shape(new_shape) + logger.debug("set Pack op [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + + if op.type == "Pow": + # https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/pow + new_shape = get_tf_tensor_shape(op.inputs[0]) + if new_shape is None: + new_shape = get_tf_tensor_shape(op.inputs[1]) + if new_shape is not None: + op.outputs[0].set_shape(new_shape) + logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape) + return True + return False + + return None + + +def set_shape_from_input(input_tensor, output_tensor): + new_shape = get_tf_tensor_shape(input_tensor) + if new_shape is not None: + output_tensor.set_shape(new_shape) + logger.debug("set [%s] with new shape %s", output_tensor.name, new_shape) + return True + return False + + +def set_shape_from_inputs_broadcast(input_tensors, output_tensor): + s1 = get_tf_tensor_shape(input_tensors[0]) + s2 = get_tf_tensor_shape(input_tensors[1]) + new_shape = broadcast_shape_inference(s1, s2) + if new_shape is not None: + output_tensor.set_shape(new_shape) + logger.debug("set [%s] with new shape %s", output_tensor.name, new_shape) + return True + return False + + +def broadcast_shape_inference(shape_0, shape_1): + if shape_0 is None: + return shape_1 + if shape_1 is None: + return shape_0 + + # two dimensions are compatible when they are equal, or one of them is 1 + # compare from last dim + if len(shape_0) > len(shape_1): + tmp = shape_0 + shape_0 = shape_1 + shape_1 = tmp + + new_shape = shape_1 + l = len(shape_0) + if l == 0: + return new_shape + + i = l - 1 + while i >= 0: + if shape_0[i] == shape_1[i]: + # do nothing + pass + elif shape_0[i] == 1: + # do nothing + pass + elif shape_1[i] == 1: + new_shape[i] = shape_0[i] + # maybe one of them is -1, we can use the other one as real shape. + elif shape_0[i] == -1: + pass + elif shape_1[i] == -1: + new_shape[i] = shape_0[i] + else: + logger.warning("two shapes not possible to broadcast, %s, %s", shape_0, shape_1) + return None + i -= 1 + return new_shape diff --git a/lib/python3.10/site-packages/tf2onnx/tf_loader.py b/lib/python3.10/site-packages/tf2onnx/tf_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..1cc724e35d687a06b10da17247ef987df93b1391 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tf_loader.py @@ -0,0 +1,639 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Methods to load tensorflow graph from graphdef, checkpoint or saved_model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +from distutils.version import LooseVersion + +import tensorflow as tf +import numpy as np +from tensorflow.python.ops import lookup_ops + +from tf2onnx import utils +from tf2onnx.tf_utils import get_tf_version, tflist_to_onnx, get_hash_table_info, replace_placeholders_with_tables + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,unused-import,no-value-for-parameter,unexpected-keyword-arg,ungrouped-imports +# pylint: disable=missing-function-docstring,import-outside-toplevel,useless-import-alias,missing-docstring + + +def is_tf2(): + return tf.__version__.startswith("2.") + + +def _not_implemented_tf_placeholder(name): + """Creates a placeholder function for missing Tensorflow imports""" + + def not_implemented_tf_placeholder(*args, **kwargs): + raise NotImplementedError( + f'Tensorflow verison {tf.__version__} does not implement ' + f'`{name}`, try converting your model with a different version.' + ) + + return not_implemented_tf_placeholder + + +try: + from tensorflow.python.framework.function_def_to_graph import function_def_to_graph +except ImportError: + function_def_to_graph = _not_implemented_tf_placeholder('function_def_to_graph') + +try: + # pylint: disable=protected-access + from tensorflow.python.saved_model.load import _RestoredResource as TfRestoredResourceType +except ImportError: + TfRestoredResourceType = tuple() # isinstance(x, tuple()) is always false + +try: + from tensorflow.python.training.tracking.tracking import AutoTrackable as TfAutoTrackableType +except ImportError: + TfAutoTrackableType = tuple() + +if is_tf2(): + convert_variables_to_constants = tf.compat.v1.graph_util.convert_variables_to_constants + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 +else: + from tensorflow.python.framework.graph_util import convert_variables_to_constants + + convert_variables_to_constants_v2 = _not_implemented_tf_placeholder('convert_variables_to_constants_v2') + +if is_tf2(): + tf_reset_default_graph = tf.compat.v1.reset_default_graph + tf_global_variables = tf.compat.v1.global_variables + tf_session = tf.compat.v1.Session # pylint: disable=invalid-name + tf_graphdef = tf.compat.v1.GraphDef + tf_import_meta_graph = tf.compat.v1.train.import_meta_graph + tf_gfile = tf.io.gfile + tf_placeholder = tf.compat.v1.placeholder + tf_placeholder_with_default = tf.compat.v1.placeholder_with_default + extract_sub_graph = tf.compat.v1.graph_util.extract_sub_graph +elif LooseVersion(tf.__version__) >= "1.13": + # 1.13 introduced the compat namespace + tf_reset_default_graph = tf.compat.v1.reset_default_graph + tf_global_variables = tf.compat.v1.global_variables + tf_session = tf.compat.v1.Session # pylint: disable=invalid-name + tf_graphdef = tf.compat.v1.GraphDef + tf_import_meta_graph = tf.compat.v1.train.import_meta_graph + tf_gfile = tf.gfile + tf_placeholder = tf.compat.v1.placeholder + tf_placeholder_with_default = tf.compat.v1.placeholder_with_default + extract_sub_graph = tf.compat.v1.graph_util.extract_sub_graph +else: + # older than 1.13 + tf_reset_default_graph = tf.reset_default_graph + tf_global_variables = tf.global_variables + tf_session = tf.Session # pylint: disable=invalid-name + tf_graphdef = tf.GraphDef + tf_import_meta_graph = tf.train.import_meta_graph + tf_gfile = tf.gfile + tf_placeholder = tf.placeholder + tf_placeholder_with_default = tf.placeholder_with_default + extract_sub_graph = tf.graph_util.extract_sub_graph + + +def inputs_without_resource(sess, input_names): + try: + new_input_names = [] + for n in input_names: + t = sess.graph.get_tensor_by_name(n) + if t.dtype != tf.dtypes.resource: + new_input_names.append(n) + input_names = new_input_names + except: # pylint: disable=bare-except + pass + return input_names + + +def convert_variables_to_constants_large_model(func): + # For large models we use internal tf methods as a hack + + if tf.__version__.startswith("2.2."): + try: + from tensorflow.python.framework.convert_to_constants import \ + _convert_variables_to_constants_v2_impl # pylint: disable=protected-access + except ImportError: + _not_implemented_tf_placeholder("_convert_variables_to_constants_v2_impl")() + frozen_graph_def, _ = \ + _convert_variables_to_constants_v2_impl(func, lower_control_flow=False, aggressive_inlining=True) + return frozen_graph_def + + try: + from tensorflow.python.framework.convert_to_constants import \ + _FunctionConverterData, _replace_variables_by_constants # pylint: disable=protected-access + except ImportError: + _not_implemented_tf_placeholder("_replace_variables_by_constants")() + converter_data = _FunctionConverterData(func=func, lower_control_flow=False, aggressive_inlining=True) + frozen_graph_def, _ = _replace_variables_by_constants(converter_data=converter_data) + return frozen_graph_def + + +def from_function(func, input_names, output_names, large_model=False): + if large_model: + return convert_variables_to_constants_large_model(func) + + if get_tf_version() < LooseVersion("2.2"): + frozen_func = convert_variables_to_constants_v2(func, lower_control_flow=False) + else: + frozen_func = convert_variables_to_constants_v2(func, lower_control_flow=False, aggressive_inlining=True) + graph_def = frozen_func.graph.as_graph_def(add_shapes=True) + # output_names = [i.name for i in frozen_func.outputs] + with tf.Graph().as_default() as tf_graph: + with tf_session(graph=tf_graph) as sess: + tf.import_graph_def(graph_def, name='') + input_names = inputs_without_resource(sess, input_names) + graph_def = tf_optimize(input_names, output_names, graph_def) + return graph_def + + +def freeze_session(sess, input_names=None, output_names=None): + """Freezes the state of a session into a pruned computation graph.""" + output_node_names = [i.split(':')[:-1][0] for i in output_names] + keep_var_names = [i.split(':')[:-1][0] for i in input_names] + with sess.graph.as_default(): + output_node_names = output_node_names or [] + output_node_names += [v.op.name for v in tf_global_variables()] + output_node_names += keep_var_names + graph_def = sess.graph.as_graph_def(add_shapes=True) + for node in graph_def.node: + node.device = "" + graph_def = convert_variables_to_constants(sess, graph_def, output_node_names) + return graph_def + + +def remove_redundant_inputs(frozen_graph, input_names): + """Remove redundant inputs not in frozen graph.""" + frozen_inputs = [] + # get inputs in frozen graph + node_names = set(n.name for n in frozen_graph.node) + frozen_inputs = [inp for inp in input_names if utils.node_name(inp) in node_names] + deleted_inputs = list(set(input_names) - set(frozen_inputs)) + if deleted_inputs: + logger.warning("inputs [%s] is not in frozen graph, delete them", ",".join(deleted_inputs)) + return frozen_inputs + + +def from_graphdef(model_path, input_names, output_names): + """Load tensorflow graph from graphdef.""" + # make sure we start with clean default graph + tf_reset_default_graph() + with tf_session() as sess: + graph_def = tf_graphdef() + with tf_gfile.GFile(model_path, 'rb') as f: + try: + content = f.read() + except Exception as e: + raise OSError( + "Unable to load file '{}'.".format(model_path)) from e + try: + graph_def.ParseFromString(content) + except Exception as e: + raise RuntimeError( + "Unable to parse file '{}'.".format(model_path)) from e + tf.import_graph_def(graph_def, name='') + input_names = inputs_without_resource(sess, input_names) + frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names) + input_names = remove_redundant_inputs(frozen_graph, input_names) + + tf_reset_default_graph() + with tf_session() as sess: + input_names = inputs_without_resource(sess, input_names) + frozen_graph = tf_optimize(input_names, output_names, frozen_graph) + tf_reset_default_graph() + return frozen_graph, input_names, output_names + + +def from_checkpoint(model_path, input_names, output_names): + """Load tensorflow graph from checkpoint.""" + # make sure we start with clean default graph + tf_reset_default_graph() + # model_path = checkpoint/checkpoint.meta + with tf.device("/cpu:0"): + with tf_session() as sess: + saver = tf_import_meta_graph(model_path, clear_devices=True) + # restore from model_path minus the ".meta" + saver.restore(sess, model_path[:-5]) + input_names = inputs_without_resource(sess, input_names) + frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names) + input_names = remove_redundant_inputs(frozen_graph, input_names) + + tf_reset_default_graph() + with tf_session() as sess: + frozen_graph = tf_optimize(input_names, output_names, frozen_graph) + tf_reset_default_graph() + return frozen_graph, input_names, output_names + + +def _from_saved_model_v1(sess, model_path, input_names, output_names, tag, signatures): + """Load tensorflow graph from saved_model.""" + + wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve" + wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]" + + if tag is None: + tag = [tf.saved_model.tag_constants.SERVING] + logger.warning(wrn_no_tag) + + if tag == '': + tag = [[]] + logger.warning(wrn_empty_tag) + + if not isinstance(tag, list): + tag = [tag] + + imported = tf.saved_model.loader.load(sess, tag, model_path) + for k in imported.signature_def.keys(): + if k.startswith("_"): + # consider signatures starting with '_' private + continue + signatures.append(k) + try: + from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils + # pylint: disable=unnecessary-lambda + get_signature_def = lambda meta_graph_def, k: \ + signature_def_utils.get_signature_def_by_key(meta_graph_def, k) + except ImportError: + # TF1.12 changed the api + get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k] + + if input_names is None: + input_names = [] + for k in signatures: + inputs_tensor_info = get_signature_def(imported, k).inputs + for _, input_tensor in inputs_tensor_info.items(): + if input_tensor.name not in input_names: + input_names.append(input_tensor.name) + tensors_to_rename = {} + if output_names is None: + output_names = [] + for k in signatures: + outputs_tensor_info = get_signature_def(imported, k).outputs + for structured_name, output_tensor in outputs_tensor_info.items(): + if output_tensor.name not in output_names: + output_names.append(output_tensor.name) + tensors_to_rename[output_tensor.name] = structured_name + frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names) + return frozen_graph, input_names, output_names, tensors_to_rename + + +def _get_hash_table_info_from_trackable(trackable, table_names, key_dtypes, value_dtypes, + removed_resource_to_placeholder, placeholder_to_table_info): + # pylint: disable=protected-access + for r in trackable.__dict__.values(): + if isinstance(r, TfRestoredResourceType) and hasattr(r, '_create_resource'): + try: + table_handle = id(r.resource_handle) + except Exception: # pylint: disable=broad-except + continue + initializer = r._create_resource.concrete_functions[0].function_def + new_names, new_k_dtypes, new_v_dtypes = get_hash_table_info(initializer.node_def) + table_names.extend(new_names) + key_dtypes.extend(new_k_dtypes) + value_dtypes.extend(new_v_dtypes) + if table_handle in removed_resource_to_placeholder and len(new_names) == 1: + table_info = (new_names[0], new_k_dtypes[0], new_v_dtypes[0]) + placeholder_to_table_info[removed_resource_to_placeholder[table_handle]] = table_info + if isinstance(r, TfAutoTrackableType): + _get_hash_table_info_from_trackable(r, table_names, key_dtypes, value_dtypes, + removed_resource_to_placeholder, placeholder_to_table_info) + + +def _remove_non_variable_resources_from_captures(concrete_func): + """ + Removes all non-variable resources (such as tables) from a function's captured inputs to prevent tf from + raising a 'cannot convert dtype resource to numpy' error while freezing the graph. + """ + # pylint: disable=protected-access + resource_id_to_placeholder = {} + graph_captures_copy = None + func_captures_copy = None + if hasattr(concrete_func.graph, '_captures') and hasattr(concrete_func, '_captured_inputs'): + graph_captures_copy = concrete_func.graph._captures.copy() + func_captures_copy = concrete_func._captured_inputs.copy() + variable_handles = {id(v.handle) for v in concrete_func.graph.variables} + for k, v in list(concrete_func.graph._captures.items()): + val_tensor, name_tensor = v + if val_tensor.dtype == tf.resource and id(val_tensor) not in variable_handles: + resource_id_to_placeholder[id(val_tensor)] = name_tensor.name.split(':')[0] + del concrete_func.graph._captures[k] + for i in reversed(range(len(concrete_func._captured_inputs))): + if concrete_func._captured_inputs[i] is val_tensor: + concrete_func._captured_inputs.pop(i) + elif val_tensor.dtype != tf.resource: + npval = val_tensor.numpy() + if not hasattr(npval, 'dtype'): + # Hack around a TF bug until PR is merged: https://github.com/tensorflow/tensorflow/pull/45610 + arr = np.array(npval) + val_tensor.numpy = lambda arr=arr: arr + else: + logger.warning( + "Could not search for non-variable resources. Concrete function internal representation may have changed.") + return resource_id_to_placeholder, graph_captures_copy, func_captures_copy + + +def _restore_captured_resources(concrete_func, graph_captures_copy, func_captures_copy): + """Undoes effect of _remove_non_variable_resources_from_captures on concrete_func""" + # pylint: disable=protected-access + if hasattr(concrete_func.graph, '_captures') and hasattr(concrete_func, '_captured_inputs'): + concrete_func.graph._captures = graph_captures_copy + concrete_func._captured_inputs = func_captures_copy + + +def _from_saved_model_v2(model_path, input_names, output_names, tag, signature_def, + concrete_function_index, large_model): + """Load tensorflow graph from saved_model.""" + + wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve" + wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]" + wrn_sig_1 = "'--signature_def' not specified, using first signature: %s" + err_many_sig = "Cannot load multiple signature defs in TF2.x: %s" + err_no_call = "Model doesn't contain usable concrete functions under __call__. Try --signature-def instead." + err_index = "Invalid concrete_function value: %i. Valid values are [0 to %i]" + err_no_sig = "No signatures found in model. Try --concrete_function instead." + err_sig_nomatch = "Specified signature not in model %s" + err_large_model = "model exceeds maximum protobuf size of 2GB. Try running with --large_model flag." + + if tag is None: + tag = ['serve'] + logger.warning(wrn_no_tag) + + if tag == '': + tag = [[]] + logger.warning(wrn_empty_tag) + + utils.make_sure(len(signature_def) < 2, err_many_sig, str(signature_def)) + imported = tf.saved_model.load(model_path, tags=tag) # pylint: disable=no-value-for-parameter + + all_sigs = imported.signatures.keys() + valid_sigs = [s for s in all_sigs if not s.startswith("_")] + logger.info("Signatures found in model: %s", "[" + ",".join(valid_sigs) + "].") + + concrete_func = None + if concrete_function_index is not None: + utils.make_sure(hasattr(imported, "__call__"), err_no_call) + utils.make_sure(concrete_function_index < len(imported.__call__.concrete_functions), + err_index, concrete_function_index, len(imported.__call__.concrete_functions) - 1) + args, kwargs = imported.__call__.concrete_functions[concrete_function_index].structured_input_signature + concrete_func = imported.__call__.get_concrete_function(*args, **kwargs) + elif signature_def: + utils.make_sure(signature_def[0] in valid_sigs, err_sig_nomatch, signature_def[0]) + concrete_func = imported.signatures[signature_def[0]] + else: + utils.make_sure(len(valid_sigs) > 0, err_no_sig) + logger.warning(wrn_sig_1, valid_sigs[0]) + concrete_func = imported.signatures[valid_sigs[0]] + + tensors_to_rename = {} + if input_names is None: + inputs = [tensor.name for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource] + if concrete_func.structured_input_signature is not None: + args, kwargs = concrete_func.structured_input_signature + structured_inputs = [t.name for t in args if isinstance(t, tf.TensorSpec)] + sorted(kwargs.keys()) + structured_inputs = set(inp + ":0" for inp in structured_inputs) + if any(inp in structured_inputs for inp in inputs): + inputs = [inp for inp in inputs if inp in structured_inputs] + else: + inputs = input_names + + if output_names is None: + outputs = [tensor.name for tensor in concrete_func.outputs if tensor.dtype != tf.dtypes.resource] + if isinstance(concrete_func.structured_outputs, dict): + # outputs are sorted, sort structured_outputs the same way + structured_outputs = sorted(concrete_func.structured_outputs.keys()) + tensors_to_rename.update(zip(outputs, structured_outputs)) + logger.info("Output names: %r", structured_outputs) + else: + logger.info("Output names: %r", outputs) + else: + outputs = output_names + logger.info("Outputs not left as None; will use provided names not structured output names.") + + # Avoid errors due to bug in TF freezing + removed_resource_to_placeholder, graph_captures_copy, func_captures_copy = \ + _remove_non_variable_resources_from_captures(concrete_func) + + try: + frozen_graph = from_function(concrete_func, inputs, outputs, large_model) + except ValueError as e: + if any(msg in str(e) for msg in ["exceeds maximum protobuf size of 2GB", "string too long"]): + raise ValueError(err_large_model) + raise e + + # We might be returning the concrete_func so let's put it back in working order + _restore_captured_resources(concrete_func, graph_captures_copy, func_captures_copy) + + table_names, key_dtypes, value_dtypes = get_hash_table_info(frozen_graph) + placeholder_to_table_info = {} + _get_hash_table_info_from_trackable(imported, table_names, key_dtypes, value_dtypes, + removed_resource_to_placeholder, placeholder_to_table_info) + + initialized_tables = {} + for n, k_dtype, val_dtype in zip(table_names, key_dtypes, value_dtypes): + h = lookup_ops.hash_table_v2(k_dtype, val_dtype, shared_name=n) + try: + k, v = lookup_ops.lookup_table_export_v2(h, k_dtype, val_dtype) + initialized_tables[n] = (k.numpy(), v.numpy()) + except Exception: # pylint: disable=broad-except + logger.warning("Could not initialize table with shared_name = %r", n) + + for placeholder in removed_resource_to_placeholder.values(): + if placeholder not in placeholder_to_table_info: + logger.error("Could not find table resource to replace placeholder %s", placeholder) + + replace_placeholders_with_tables(frozen_graph, placeholder_to_table_info) + + return frozen_graph, inputs, outputs, concrete_func, imported, initialized_tables, tensors_to_rename + + +def from_saved_model(model_path, input_names, output_names, tag=None, + signatures=None, concrete_function=None, large_model=False, + return_concrete_func=False, return_initialized_tables=False, return_tensors_to_rename=False): + """Load tensorflow graph from saved_model.""" + if signatures is None: + signatures = [] + tf_reset_default_graph() + with tf.device("/cpu:0"): + if is_tf2(): + frozen_graph, input_names, output_names, concrete_func, imported, initialized_tables, tensors_to_rename = \ + _from_saved_model_v2(model_path, input_names, output_names, + tag, signatures, concrete_function, large_model) + result = [frozen_graph, input_names, output_names] + if return_concrete_func: + result += [concrete_func, imported] + if return_initialized_tables: + result += [initialized_tables] + if return_tensors_to_rename: + result += [tensors_to_rename] + else: + with tf_session() as sess: + frozen_graph, input_names, output_names, tensors_to_rename = \ + _from_saved_model_v1(sess, model_path, input_names, output_names, tag, signatures) + result = [frozen_graph, input_names, output_names] + if return_initialized_tables: + result += [{}] + if return_tensors_to_rename: + result += [tensors_to_rename] + tf_reset_default_graph() + return result + + +def from_keras(model_path, input_names, output_names): + """Load keras model - experimental for now.""" + from tensorflow.python import keras as _keras + from tensorflow.python.eager import context + from tensorflow.python.keras.saving import saving_utils as _saving_utils + + # Handles Keras when Eager mode is enabled. + custom_objects = None + with tf.device("/cpu:0"): + if context.executing_eagerly(): + _keras.backend.clear_session() + _keras.backend.set_learning_phase(False) + keras_model = _keras.models.load_model(model_path, custom_objects) + + function = _saving_utils.trace_model_call(keras_model) + concrete_func = function.get_concrete_function() + # allow to pass inputs and outputs from caller if we don't want all of them + input_names = [input_tensor.name for input_tensor in concrete_func.inputs + if input_tensor.dtype != tf.dtypes.resource] + output_names = [output_tensor.name for output_tensor in concrete_func.outputs + if output_tensor.dtype != tf.dtypes.resource] + frozen_graph = from_function(concrete_func, input_names, output_names) + else: + # Handles Keras when Eager mode is disabled. + _keras.backend.clear_session() + _keras.backend.set_learning_phase(False) + keras_model = _keras.models.load_model(model_path, custom_objects) + # allow to pass inputs and outputs from caller if we don't want all of them + input_names = keras_model.inputs + output_names = keras_model.outputs + sess = _keras.backend.get_session() + input_names = inputs_without_resource(sess, input_names) + frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names) + tf_reset_default_graph() + with tf_session() as sess: + frozen_graph = tf_optimize(input_names, output_names, frozen_graph) + tf_reset_default_graph() + return frozen_graph, input_names, output_names + + +def tf_optimize_grappler(input_names, output_names, graph_def, fold_constant=None): + from tensorflow.core.protobuf import meta_graph_pb2 as meta_graph_pb2, config_pb2, rewriter_config_pb2 + from tensorflow.python.grappler import tf_optimizer as tf_opt + + config = config_pb2.ConfigProto() + rewrite_options = config.graph_options.rewrite_options + config.graph_options.infer_shapes = True + # TODO: if we turn on pruning, grappler removes some identities that the tf-1.x lstm rewriter + # depends on so for now don't turn this on. + rewrite_options.optimizers[:] = [ + # 'pruning', 'constfold', 'arithmetic', 'dependency', 'function', + 'constfold', 'function' + ] + meta_graph = tf.compat.v1.train.export_meta_graph(graph_def=graph_def) + fetch_collection = meta_graph_pb2.CollectionDef() + for t in input_names + output_names: + fetch_collection.node_list.value.append(t) + meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) + graph_def = tf_opt.OptimizeGraph(config, meta_graph) + return graph_def + + +def tf_optimize(input_names, output_names, graph_def, fold_constant=True): + """Extract inference subgraph and optimize graph.""" + assert isinstance(input_names, list) + assert isinstance(output_names, list) + + # TODO: is this needed ? + needed_names = [utils.node_name(i) for i in input_names] + \ + [utils.node_name(i) for i in output_names] + graph_def = extract_sub_graph(graph_def, needed_names) + + want_grappler = is_tf2() or LooseVersion(tf.__version__) >= "1.15" + if want_grappler: + graph_def = tf_optimize_grappler(input_names, output_names, graph_def, fold_constant) + else: + # the older transform path + from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=redefined-outer-name + transforms = [ + "fold_constants(ignore_errors=true)", + "remove_attribute(attribute_name=_class)", # remove node colocation attributes + "fold_batch_norms", + "fold_old_batch_norms", + ] + graph_def = TransformGraph(graph_def, input_names, output_names, transforms) + + return graph_def + + +def tf_reload_graph(tf_graph): + """Invoke tensorflow cpp shape inference by reloading graph_def.""" + # invoke c api if tf version is below 1.8 + if get_tf_version() < LooseVersion("1.8"): + logger.debug( + "On TF < 1.8, graph is constructed by python API, " + "which doesn't invoke shape inference, please set " + "TF_C_API_GRAPH_CONSTRUCTION=1 to enable it" + ) + + graph_def = tf_graph.as_graph_def(add_shapes=True) + with tf.Graph().as_default() as inferred_graph: + tf.import_graph_def(graph_def, name="") + return inferred_graph + + +def is_function(g): + if is_tf2(): + return 'tensorflow.python.framework.func_graph.FuncGraph' in str(type(g)) + return False + + +_FUNCTIONS = {} + + +def resolve_functions(tf_graph): + def toposort(data): + while True: + ordered = set(item for item, dep in data.items() if not dep) + if not ordered: + break + yield ordered + data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered} + + _, _, _, _, _, functions = tflist_to_onnx(tf_graph, {}) + data = {} + for k, fdef in tf_graph._functions.items(): # pylint: disable=protected-access + input_shapes = functions.get(k) + fdef = fdef.definition + if input_shapes and len(fdef.signature.input_arg) < len(input_shapes): + input_shapes = input_shapes[:len(fdef.signature.input_arg)] + try: + func = function_def_to_graph(fdef, input_shapes=input_shapes) + except: # pylint: disable=bare-except + # if there is a missmatch between caller and function use the functions shape + logger.warning("shape missmatch between caller and function: %s", k) + func = function_def_to_graph(fdef) + _FUNCTIONS[k] = func + _, _, _, _, _, tfunctions = tflist_to_onnx(func, {}) + functions.update(tfunctions) + data[k] = set(tfunctions.keys()) + + result = [] + for d in toposort(data): + result.extend(list(d)) + return [_FUNCTIONS[k] for k in result] + + +def set_function(name, func): + _FUNCTIONS[name] = func + + +def find_function(name): + return _FUNCTIONS.get(name) diff --git a/lib/python3.10/site-packages/tf2onnx/tf_utils.py b/lib/python3.10/site-packages/tf2onnx/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a120412a0b8d39a7a14679e195833f77e471fdcb --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tf_utils.py @@ -0,0 +1,460 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.tf_utils - misc utilities for tf2onnx that interface with tensorflow +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import collections +from distutils.version import LooseVersion + +import numpy as np +import tensorflow as tf + +from tensorflow.core.framework import types_pb2, tensor_pb2, graph_pb2 +from tensorflow.python.framework import tensor_util + +from onnx import helper, onnx_pb, numpy_helper + +from tf2onnx.utils import make_sure, is_tf_const_op, port_name, map_onnx_to_numpy_type +from . import logging + +logger = logging.getLogger(__name__) + +# +# mapping dtypes from tensorflow to onnx +# +TF_TO_ONNX_DTYPE = { + types_pb2.DT_FLOAT: onnx_pb.TensorProto.FLOAT, + types_pb2.DT_HALF: onnx_pb.TensorProto.FLOAT16, + types_pb2.DT_BFLOAT16: onnx_pb.TensorProto.FLOAT16, + types_pb2.DT_DOUBLE: onnx_pb.TensorProto.DOUBLE, + types_pb2.DT_INT32: onnx_pb.TensorProto.INT32, + types_pb2.DT_INT16: onnx_pb.TensorProto.INT16, + types_pb2.DT_INT8: onnx_pb.TensorProto.INT8, + types_pb2.DT_UINT8: onnx_pb.TensorProto.UINT8, + types_pb2.DT_UINT16: onnx_pb.TensorProto.UINT16, + types_pb2.DT_INT64: onnx_pb.TensorProto.INT64, + types_pb2.DT_STRING: onnx_pb.TensorProto.STRING, + types_pb2.DT_COMPLEX64: onnx_pb.TensorProto.COMPLEX64, + types_pb2.DT_COMPLEX128: onnx_pb.TensorProto.COMPLEX128, + types_pb2.DT_BOOL: onnx_pb.TensorProto.BOOL, + types_pb2.DT_RESOURCE: onnx_pb.TensorProto.INT64, # TODO: hack to allow processing on control flow + types_pb2.DT_VARIANT: onnx_pb.TensorProto.UNDEFINED, + types_pb2.DT_QUINT8: onnx_pb.TensorProto.UINT8, +} + + +def tf_to_onnx_tensor(tensor, name=""): + """Convert tensorflow tensor to onnx tensor.""" + np_data = get_tf_tensor_data(tensor) + if np_data.dtype == np.object: + # assume np_data is string, numpy_helper.from_array accepts ndarray, + # in which each item is of str while the whole dtype is of object. + try: + # Faster but fails on Unicode + np_data = np_data.astype(np.str).astype(np.object) + except UnicodeDecodeError: + decode = np.vectorize(lambda x: x.decode('UTF-8')) + np_data = decode(np_data).astype(np.object) + except: # pylint: disable=bare-except + raise RuntimeError("Not support type: {}".format(type(np_data.flat[0]))) + return numpy_helper.from_array(np_data, name=name) + + +def get_tf_tensor_data(tensor): + """Get data from tensor.""" + make_sure(isinstance(tensor, tensor_pb2.TensorProto), "Require TensorProto") + np_data = tensor_util.MakeNdarray(tensor) + make_sure(isinstance(np_data, np.ndarray), "%r isn't ndarray", np_data) + return np_data + + +def get_tf_const_value(op, as_list=True): + """ + If as_list=True, return the array as a (possibly nested) list. + Otherwise, return data of type np.ndarray. + + If a tensor is a scalar having value 1, + when as_list=False, return np.array(1), type is + when as_list=True, return 1, type is . + """ + make_sure(is_tf_const_op(op), "%r isn't a const op", op.name) + value = get_tf_tensor_data(op.get_attr("value")) + if as_list: + value = value.tolist() + return value + + +def get_tf_shape_attr(node): + """Get shape from tensorflow attr "shape".""" + dims = None + try: + shape = get_tf_node_attr(node, "shape") + if not shape.unknown_rank: + dims = [int(d.size) for d in shape.dim] + except: # pylint: disable=bare-except + pass + return dims + + +def get_tf_tensor_shape(tensor): + shape = [] + try: + shape = tensor.get_shape().as_list() + except Exception: # pylint: disable=broad-except + shape = None + return shape + + +def map_tf_dtype(dtype): + if dtype: + dtype = TF_TO_ONNX_DTYPE[dtype] + return dtype + + +def get_tf_node_attr(node, name): + """Parser TF node attribute.""" + return node.get_attr(name) + + +def get_tf_version(): + return LooseVersion(tf.__version__) + +def compress_graph_def(graph_def): + """ + Remove large const values from graph. This lets us import the graph and run shape inference without TF crashing. + """ + node_defs = list(graph_def.node) + const_node_values = {} + for node_def in node_defs: + if node_def.op == 'Const': + tensor = node_def.attr["value"].tensor + # Small constants are sometimes used to store shape information and must be maintained + if len(tensor.tensor_content) > 1000: + make_sure(node_def.name not in const_node_values, "Two nodes in graph have same name %s", node_def.name) + const_node_values[node_def.name] = tensor.tensor_content + tensor.tensor_content = b'' + return const_node_values + +def get_index_from_strided_slice_of_shape(node, outputs_to_values): + """Returns the index of the dimension that the strided slice is reading from the shape node or None""" + attr_vals = { + 'shrink_axis_mask': 1, + 'ellipsis_mask': 0, + 'begin_mask': 0, + 'new_axis_mask': 0, + 'end_mask': 0 + } + for a in node.node_def.attr: + if a in attr_vals: + i = get_tf_node_attr(node, a) + if i != attr_vals[a]: + return None + i1 = outputs_to_values.get(node.inputs[1].name) + i2 = outputs_to_values.get(node.inputs[2].name) + i3 = outputs_to_values.get(node.inputs[3].name) + if i1 is None or i2 is None or i3 is None: + return None + if i1.shape != (1,) or i2.shape != (1,) or i3.shape != (1,): + return None + i1, i2, i3 = i1[0], i2[0], i3[0] + if i1 + 1 != i2 or i3 != 1: + return None + return i1 + +def compute_const_folding_using_tf(g, const_node_values, graph_outputs): + """Find nodes with constant inputs and compute their values using TF""" + if const_node_values is None: + const_node_values = {} + graph_outputs = set(graph_outputs) + from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel + + ops = g.get_operations() + outputs_to_values = {} + outputs_to_dtypes = {} + outputs_to_shapes = {} + shape_node_outputs = {} + + def is_small_shape(x): + return np.product(x) <= 1000 + + def is_huge_shape(x): + return np.product(x) >= 1000000 + + for node in ops: + # Load values of constants. Use const_node_values if possible + if node.type in ["Const", "ConstV2"]: + tensor = node.node_def.attr["value"].tensor + if node.name in const_node_values: + tensor.tensor_content = const_node_values[node.name] + outputs_to_values[node.outputs[0].name] = get_tf_tensor_data(tensor) + outputs_to_dtypes[node.outputs[0].name] = node.outputs[0].dtype + for out in node.outputs: + outputs_to_shapes[out.name] = get_tf_tensor_shape(out) + + for node in ops: + if node.type == "Shape": + shape = outputs_to_shapes.get(node.inputs[0].name) + if shape is not None: + shape_node_outputs[node.outputs[0].name] = shape + + unneeded_outputs = set() + progress = True + while progress: + progress = False + for node in ops: + # Find ops with constant inputs and compute their values + input_names = [i.name for i in node.inputs] + output_names = [i.name for i in node.outputs] + if node.type == 'StridedSlice' and input_names[0] in shape_node_outputs \ + and output_names[0] not in outputs_to_values: + shape = shape_node_outputs[input_names[0]] + i = get_index_from_strided_slice_of_shape(node, outputs_to_values) + if i is not None and 0 <= i < len(shape) and shape[i] is not None: + np_dtype = map_onnx_to_numpy_type(map_tf_dtype(node.outputs[0].dtype)) + outputs_to_values[output_names[0]] = np.array(shape[i], dtype=np_dtype) + outputs_to_dtypes[node.outputs[0].name] = node.outputs[0].dtype + progress = True + can_fold = node.type not in ['Enter', 'Placeholder', 'PlaceholderWithDefault'] + can_fold = can_fold and not node.type.startswith('Random') + can_fold = can_fold and len(input_names) > 0 and all(inp in outputs_to_values for inp in input_names) + # We can only fold nodes with a single output + can_fold = can_fold and len(output_names) == 1 and output_names[0] not in outputs_to_values + # Skip if value already computed, used, and discarded + can_fold = can_fold and output_names[0] not in unneeded_outputs and output_names[0] not in graph_outputs + if can_fold: + # Make a mini graph containing just the node to fold + g2 = tf.Graph() + with g2.as_default(): + for inp in input_names: + tf_placeholder(outputs_to_dtypes[inp], name=inp.split(':')[0]) + mini_graph_def = g2.as_graph_def() + mini_graph_def.node.append(node.node_def) + g3 = tf.Graph() + with g3.as_default(): + feed_dict = {} + inp_shapes = [] + for inp in input_names: + inp_np = outputs_to_values[inp] + feed_dict[inp] = inp_np + inp_shapes.append(inp_np.shape) + try: + with tf_session() as sess: + tf.import_graph_def(mini_graph_def, name='') + results = sess.run(output_names, feed_dict=feed_dict) + if is_huge_shape(results[0].shape) and all(is_small_shape(inp) for inp in inp_shapes): + logger.debug("Skipping folding of node %s since result shape %s is much larger " + "than input shapes %s", node.name, results[0].shape, inp_shapes) + else: + outputs_to_values[output_names[0]] = results[0] + outputs_to_dtypes[output_names[0]] = node.outputs[0].dtype + progress = True + except Exception: # pylint: disable=broad-except + logger.debug("Could not fold node %s", node.name) + unneeded_outputs.update(outputs_to_values.keys()) + for node in ops: + # Mark values we need to keep + input_names = [i.name for i in node.inputs] + output_names = [i.name for i in node.outputs] + if len(output_names) == 1 and output_names[0] in outputs_to_values: + continue + for i in input_names: + if i in unneeded_outputs: + unneeded_outputs.remove(i) + for node in unneeded_outputs: + # Remove unneeded values to prevent memory usage explosion + if node in outputs_to_values: + del outputs_to_values[node] + del outputs_to_dtypes[node] + + for node in ops: + # We don't need the constants any more + if node.type in ["Const", "ConstV2"] and node.outputs[0].name in outputs_to_values: + del outputs_to_values[node.outputs[0].name] + del outputs_to_dtypes[node.outputs[0].name] + + logger.info("Computed %d values for constant folding", len(outputs_to_values)) + return outputs_to_values, outputs_to_dtypes + +def get_hash_table_info(nodes_or_graph_def): + """ + Return lists of the shared_names, key_dtypes, and value_dtypes of all hash tables declared in the graph_def + or list of nodes + """ + if isinstance(nodes_or_graph_def, graph_pb2.GraphDef): + nodes = nodes_or_graph_def.node + else: + nodes = nodes_or_graph_def + names = [] + key_dtypes = [] + val_dtypes = [] + for n in nodes: + if n.op in ["HashTableV2", "MutableHashTableV2"]: + if all(k in n.attr for k in ['shared_name', 'key_dtype', 'value_dtype']): + name = n.attr['shared_name'].s + if name != b'': + names.append(name) + key_dtypes.append(n.attr['key_dtype'].type) + val_dtypes.append(n.attr['value_dtype'].type) + return names, key_dtypes, val_dtypes + +def replace_placeholders_with_tables(graph_def, placeholder_to_table_info): + """ + Given a graph_def and a map from placeholder names to a tuple of table names, key dtypes, and value dtypes, + Replaces placeholder ops in the graph_def with HashTableV2 ops + """ + for n in graph_def.node: + if n.op == "Placeholder" and n.name in placeholder_to_table_info: + name, key_dtype, val_dtype = placeholder_to_table_info[n.name] + for a in list(n.attr): + del n.attr[a] + n.op = "HashTableV2" + n.attr['shared_name'].s = name + n.attr['key_dtype'].type = key_dtype + n.attr['value_dtype'].type = val_dtype + +def read_tf_node_def_attrs(node_def, input_dtypes, input_shapes): + """Given a tf node def, returns a dict of attribute names to values""" + from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel + del node_def.input[:] + node_def.name = "node" + + # read_tf_node_attrs uses some tf methods that require the node to be loaded into a valid TF graph + g = tf.Graph() + with g.as_default(): + for i, (dtype, shape) in enumerate(zip(input_dtypes, input_shapes)): + inp = "input" + str(i) + tf_placeholder(dtype, name=inp, shape=shape) + node_def.input.append(inp) + mini_graph_def = g.as_graph_def() + mini_graph_def.node.append(node_def) + g2 = tf.Graph() + with g2.as_default(): + with tf_session() as sess: + tf.import_graph_def(mini_graph_def, name='') + node = sess.graph.get_operation_by_name("node") + return read_tf_node_attrs(node) + + +def read_tf_node_attrs(node): + """Given a tf Node, returns a dict of attribute names to values""" + attr = {} + attr_cnt = collections.Counter() + + # ignore the following attributes + ignored_attr = {"T", "unknown_rank", "_class", "Tshape", "use_cudnn_on_gpu", "Index", "Tpaddings", + "TI", "Tparams", "Tindices", "Tlen", "Tdim", "Tin", "dynamic_size", "Tmultiples", + "Tblock_shape", "Tcrops", "index_type", "Taxis", "U", "maxval", + "Tout", "Tlabels", "Tindex", "element_shape", "Targmax", "Tperm", "Tcond", + "T_threshold", "element_dtype", "shape_type", "_lower_using_switch_merge", + "parallel_iterations", "_num_original_outputs", "output_types", "output_shapes", + "key_dtype", "value_dtype", "Tin", "Tout", "capacity", "component_types", "shapes", + "Toutput_types", "dense_shapes", "Tdense", "Tsegmentids", "Tshift", "Tnumsegments", "SrcT", + "body", "cond", "then_branch", "else_branch", "f", + "Tcomplex", "Treal", # For RFFT, Tcomplex is ignored because + # onnx.helper.make_node fails, + # TODO: it should be added back. + } + + for a in node.node_def.attr: + attr_cnt[a] += 1 + value = get_tf_node_attr(node, a) + if a in ignored_attr or isinstance(value, tensor_pb2.TensorProto): + pass + elif a == "shape": + shape = get_tf_shape_attr(node) + if shape is not None: + attr[a] = shape + elif a == "DstT": + attr["to"] = map_tf_dtype(value) + elif isinstance(value, tf.DType): + attr[a] = map_tf_dtype(value) + elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], tf.DType): + attr[a] = [map_tf_dtype(v) for v in value] + else: + attr[a] = get_tf_node_attr(node, a) + + return attr, attr_cnt + +def tflist_to_onnx(g, shape_override, const_node_values=None, ignore_default=None, use_default=None): + """ + Convert the tf-node list into an onnx graph with minimal rewrites so + we can use the onnx graph as intermediate graph. + """ + + node_list = g.get_operations() + functions = {} + + # some stats + op_cnt = collections.Counter() + attr_cnt = collections.Counter() + onnx_nodes = [] + output_shapes = {} + dtypes = {} + + # find outputs + ops = node_list + + # create dict with output to shape mappings + for node in ops: + for out in node.outputs: + shape = shape_override.get(out.name) + if shape is None: + shape = get_tf_tensor_shape(out) + dtypes[out.name] = map_tf_dtype(out.dtype) + output_shapes[out.name] = shape + + for node in ops: + attr, new_attr_cnt = read_tf_node_attrs(node) + attr_cnt += new_attr_cnt + takeit = True + op_cnt[node.type] += 1 + for a in node.node_def.attr: + attr_cnt[a] += 1 + value = get_tf_node_attr(node, a) + if a == "T": + if value and not isinstance(value, list): + dtypes[node.name] = map_tf_dtype(value) + elif a in {"body", "cond", "then_branch", "else_branch", "f"}: + input_shapes = [inp.get_shape() for inp in node.inputs] + nattr = get_tf_node_attr(node, a) + attr[a] = nattr.name + functions[nattr.name] = input_shapes + elif isinstance(value, tensor_pb2.TensorProto): + if const_node_values and node.name in const_node_values: + value.tensor_content = const_node_values[node.name] + onnx_tensor = tf_to_onnx_tensor(value, name=port_name(node.name)) + attr[a] = onnx_tensor + + node_type = node.type + input_names = [i.name for i in node.inputs] + output_names = [i.name for i in node.outputs] + + if node_type == 'PlaceholderWithDefault': + if ignore_default and node.name in ignore_default: + node_type = 'Placeholder' + input_names = [] + elif use_default and node.name in use_default: + node_type = 'Identity' + + if takeit: + try: + onnx_node = helper.make_node(node_type, input_names, output_names, name=node.name, **attr) + onnx_nodes.append(onnx_node) + except Exception as ex: + logger.error("pass1 convert failed for %s, ex=%s", node, ex) + raise + + return onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, functions + + +def tensorflow_to_onnx(graph, shape_override, const_node_values=None, ignore_default=None, use_default=None): + """ + Load tensorflow graph and do a conversion. + """ + return tflist_to_onnx(graph, shape_override, const_node_values, ignore_default, use_default) diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/AbsOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/AbsOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..e88849c9c49aaf68b3b0ba8bb576fd6d6138c3de --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/AbsOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class AbsOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsAbsOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AbsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # AbsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def AbsOptionsStart(builder): builder.StartObject(0) +def AbsOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ActivationFunctionType.py b/lib/python3.10/site-packages/tf2onnx/tflite/ActivationFunctionType.py new file mode 100644 index 0000000000000000000000000000000000000000..6e564a69fb1ee44f9433fe48c638fbc5506a3d98 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ActivationFunctionType.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class ActivationFunctionType(object): + NONE = 0 + RELU = 1 + RELU_N1_TO_1 = 2 + RELU6 = 3 + TANH = 4 + SIGN_BIT = 5 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ArgMinOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/ArgMinOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..29475d01fbe05762bd008faf27bb63024a1080f8 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ArgMinOptions.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ArgMinOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsArgMinOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ArgMinOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ArgMinOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ArgMinOptions + def OutputType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def ArgMinOptionsStart(builder): builder.StartObject(1) +def ArgMinOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0) +def ArgMinOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/CosOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/CosOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..e667331fa9eec8b307802d371fec39d96d3db930 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/CosOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class CosOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsCosOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CosOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CosOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def CosOptionsStart(builder): builder.StartObject(0) +def CosOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/DepthToSpaceOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/DepthToSpaceOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ed8a4ba0a3b04a64dff31e908663d8ba3f176b4b --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/DepthToSpaceOptions.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class DepthToSpaceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsDepthToSpaceOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DepthToSpaceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DepthToSpaceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DepthToSpaceOptions + def BlockSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def DepthToSpaceOptionsStart(builder): builder.StartObject(1) +def DepthToSpaceOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0) +def DepthToSpaceOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/DepthwiseConv2DOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/DepthwiseConv2DOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..01c631e1d9783151d569d550167315dcaffc5798 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/DepthwiseConv2DOptions.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class DepthwiseConv2DOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsDepthwiseConv2DOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DepthwiseConv2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DepthwiseConv2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DepthwiseConv2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def DepthMultiplier(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # DepthwiseConv2DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + +def DepthwiseConv2DOptionsStart(builder): builder.StartObject(7) +def DepthwiseConv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) +def DepthwiseConv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) +def DepthwiseConv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) +def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier): builder.PrependInt32Slot(3, depthMultiplier, 0) +def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(4, fusedActivationFunction, 0) +def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(5, dilationWFactor, 1) +def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(6, dilationHFactor, 1) +def DepthwiseConv2DOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/FillOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/FillOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..81be6c0c94adcd1ec6ef49bd11b038e393a1ca4e --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/FillOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class FillOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsFillOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FillOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # FillOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def FillOptionsStart(builder): builder.StartObject(0) +def FillOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/GatherOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/GatherOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9e07b5cd5069d9566580c7feb2885525593037c9 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/GatherOptions.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class GatherOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsGatherOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GatherOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # GatherOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # GatherOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def GatherOptionsStart(builder): builder.StartObject(1) +def GatherOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0) +def GatherOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/GreaterOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/GreaterOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..cce07307c0a6438e11aa4d0e0393b287c972db04 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/GreaterOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class GreaterOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsGreaterOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GreaterOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # GreaterOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def GreaterOptionsStart(builder): builder.StartObject(0) +def GreaterOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/LessEqualOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/LessEqualOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..8a971f3bdef1331c78c304bd998ee2fc56764e96 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/LessEqualOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class LessEqualOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsLessEqualOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LessEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LessEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def LessEqualOptionsStart(builder): builder.StartObject(0) +def LessEqualOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/LocalResponseNormalizationOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/LocalResponseNormalizationOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..50606440a4fef93cc2c71b8846fba49be38cb954 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/LocalResponseNormalizationOptions.py @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class LocalResponseNormalizationOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LocalResponseNormalizationOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def LocalResponseNormalizationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LocalResponseNormalizationOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LocalResponseNormalizationOptions + def Radius(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # LocalResponseNormalizationOptions + def Bias(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # LocalResponseNormalizationOptions + def Alpha(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # LocalResponseNormalizationOptions + def Beta(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + +def LocalResponseNormalizationOptionsStart(builder): builder.StartObject(4) +def LocalResponseNormalizationOptionsAddRadius(builder, radius): builder.PrependInt32Slot(0, radius, 0) +def LocalResponseNormalizationOptionsAddBias(builder, bias): builder.PrependFloat32Slot(1, bias, 0.0) +def LocalResponseNormalizationOptionsAddAlpha(builder, alpha): builder.PrependFloat32Slot(2, alpha, 0.0) +def LocalResponseNormalizationOptionsAddBeta(builder, beta): builder.PrependFloat32Slot(3, beta, 0.0) +def LocalResponseNormalizationOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/MirrorPadOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/MirrorPadOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ec6ecacdf902434fbf15e7a152ff1eabe0725fd1 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/MirrorPadOptions.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class MirrorPadOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsMirrorPadOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MirrorPadOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # MirrorPadOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # MirrorPadOptions + def Mode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def MirrorPadOptionsStart(builder): builder.StartObject(1) +def MirrorPadOptionsAddMode(builder, mode): builder.PrependInt8Slot(0, mode, 0) +def MirrorPadOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/Model.py b/lib/python3.10/site-packages/tf2onnx/tflite/Model.py new file mode 100644 index 0000000000000000000000000000000000000000..fdb4a1b04989d308c01ae1f737b7a4674e402d32 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/Model.py @@ -0,0 +1,210 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class Model(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsModel(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Model() + x.Init(buf, n + offset) + return x + + @classmethod + def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Model + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Model + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # Model + def OperatorCodes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from tf2onnx.tflite.OperatorCode import OperatorCode + obj = OperatorCode() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def OperatorCodesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def OperatorCodesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # Model + def Subgraphs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from tf2onnx.tflite.SubGraph import SubGraph + obj = SubGraph() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def SubgraphsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def SubgraphsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # Model + def Description(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Model + def Buffers(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from tf2onnx.tflite.Buffer import Buffer + obj = Buffer() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def BuffersLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def BuffersIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # Model + def MetadataBuffer(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Model + def MetadataBufferAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Model + def MetadataBufferLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def MetadataBufferIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + return o == 0 + + # Model + def Metadata(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from tf2onnx.tflite.Metadata import Metadata + obj = Metadata() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def MetadataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def MetadataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + return o == 0 + + # Model + def SignatureDefs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from tf2onnx.tflite.SignatureDef import SignatureDef + obj = SignatureDef() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def SignatureDefsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def SignatureDefsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + +def ModelStart(builder): builder.StartObject(8) +def ModelAddVersion(builder, version): builder.PrependUint32Slot(0, version, 0) +def ModelAddOperatorCodes(builder, operatorCodes): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0) +def ModelStartOperatorCodesVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ModelAddSubgraphs(builder, subgraphs): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0) +def ModelStartSubgraphsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ModelAddDescription(builder, description): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0) +def ModelAddBuffers(builder, buffers): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0) +def ModelStartBuffersVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ModelAddMetadataBuffer(builder, metadataBuffer): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0) +def ModelStartMetadataBufferVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ModelAddMetadata(builder, metadata): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(metadata), 0) +def ModelStartMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ModelAddSignatureDefs(builder, signatureDefs): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(signatureDefs), 0) +def ModelStartSignatureDefsVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ModelEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/QuantizeOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/QuantizeOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0476f56b9b8bf01836ac02b7ee396f6cbd740978 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/QuantizeOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class QuantizeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsQuantizeOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = QuantizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # QuantizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def QuantizeOptionsStart(builder): builder.StartObject(0) +def QuantizeOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ReverseSequenceOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/ReverseSequenceOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..b39fb3f0a5b592f6e03a32e49e542508c230ee48 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ReverseSequenceOptions.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ReverseSequenceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsReverseSequenceOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReverseSequenceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReverseSequenceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReverseSequenceOptions + def SeqDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ReverseSequenceOptions + def BatchDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def ReverseSequenceOptionsStart(builder): builder.StartObject(2) +def ReverseSequenceOptionsAddSeqDim(builder, seqDim): builder.PrependInt32Slot(0, seqDim, 0) +def ReverseSequenceOptionsAddBatchDim(builder, batchDim): builder.PrependInt32Slot(1, batchDim, 0) +def ReverseSequenceOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ScatterNdOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/ScatterNdOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c759d386a28be14e360a2f18458ea88a695f13e7 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ScatterNdOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ScatterNdOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsScatterNdOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ScatterNdOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ScatterNdOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ScatterNdOptionsStart(builder): builder.StartObject(0) +def ScatterNdOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/SparseIndexVector.py b/lib/python3.10/site-packages/tf2onnx/tflite/SparseIndexVector.py new file mode 100644 index 0000000000000000000000000000000000000000..6231651c6691c812bb7da62e98c957dbdde68ddf --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/SparseIndexVector.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class SparseIndexVector(object): + NONE = 0 + Int32Vector = 1 + Uint16Vector = 2 + Uint8Vector = 3 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/SplitVOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/SplitVOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..b431775b0ed6ddcfc68866de39db283684abba8a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/SplitVOptions.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class SplitVOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsSplitVOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SplitVOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SplitVOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SplitVOptions + def NumSplits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def SplitVOptionsStart(builder): builder.StartObject(1) +def SplitVOptionsAddNumSplits(builder, numSplits): builder.PrependInt32Slot(0, numSplits, 0) +def SplitVOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/SquaredDifferenceOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/SquaredDifferenceOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c79eacba5bdc732a4e02708d5450948ff6ff9580 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/SquaredDifferenceOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class SquaredDifferenceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsSquaredDifferenceOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SquaredDifferenceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def SquaredDifferenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SquaredDifferenceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SquaredDifferenceOptionsStart(builder): builder.StartObject(0) +def SquaredDifferenceOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_handlers/__init__.py b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed971b878be042af738f3e41d837e30dea56aa3a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/__init__.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: Apache-2.0 + +"""tf2onnx.tflite_handlers module""" + +from . import ( + tfl_math, + tfl_nn, + tfl_controlflow, + tfl_direct, + tfl_tensor, + tfl_postprocess, +) diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_controlflow.py b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_controlflow.py new file mode 100644 index 0000000000000000000000000000000000000000..60724a40bead90f5954130fb63869a9c63db5698 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_controlflow.py @@ -0,0 +1,134 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tfl_controlflow +""" + +import copy +import numpy as np +from onnx.onnx_pb import TensorProto + +from tf2onnx.handler import tfl_op +from tf2onnx import utils +from tf2onnx.tf_loader import find_function +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.onnx_opset.controlflow import parameter_binding, inline_subgraph + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +@tfl_op(["TFL_WHILE"]) +class TflWhile: + @classmethod + def version_7(cls, ctx, node, **kwargs): + tfl_while_inputs = node.input + output_shapes = node.output_shapes + output_dtypes = node.output_dtypes + output_names = node.output + + cond_name = node.get_attr_str("cond_subgraph_index") + cond_graph = find_function(cond_name) + cond_graph.parent_graph = ctx + + body_name = node.get_attr_str("body_subgraph_index") + body = find_function(body_name) + body.parent_graph = ctx + + ctx.remove_node(node.name) + + cond_binding = parameter_binding(cond_graph, tfl_while_inputs) + cond_outputs = inline_subgraph(ctx, cond_graph, cond_name, cond_binding) + + # Potential scan output candidates are identified in the body subgraph using tfl_scan_output_rewriter. + # They can then be optimized in this tfl loop handler provided they are not used in the cond subgraph. + scan_outputs = sorted(body.scan_outputs, reverse=True) + def input_is_unused(g, index): + return len(g.find_output_consumers(g.inputs[index])) == 0 + scan_outputs = [(i, out) for i, out in scan_outputs if input_is_unused(cond_graph, i)] + + for idx, _ in scan_outputs: + del tfl_while_inputs[idx] + output_shapes.append(output_shapes.pop(idx)) + output_dtypes.append(output_dtypes.pop(idx)) + output_names.append(output_names.pop(idx)) + + max_iterations = ctx.make_const(utils.make_name("max_iterations"), np.array(np.iinfo(np.int64).max)) + + loop_node = ctx.make_node("Loop", [max_iterations.output[0], cond_outputs[0]] + tfl_while_inputs, + output_count=len(output_shapes), name=node.name + "_loop", + shapes=output_shapes, dtypes=output_dtypes, skip_conversion=True) + + output_map = dict(zip(output_names, loop_node.output)) + + # shift output consumers + for k, v in output_map.items(): + ctx.replace_all_inputs(k, v) # ops=ctx.get_nodes() + + body = wire_tfl_while_body(body, loop_node.inputs, output_shapes, output_dtypes, cond_graph, scan_outputs) + + for i in range(len(scan_outputs)): + squeeze_node = GraphBuilder(body).make_squeeze( + {'data': body.outputs[-1-i], "axes": [0]}, return_node=True) + body.outputs[-1-i] = squeeze_node.output[0] + + loop_node.set_body_graph_as_attr("body", body) + +def wire_tfl_while_body(g, loop_node_inputs, output_shapes, + output_dtypes, cond_graph, scan_outputs): + """Wire subgraph graph into main.""" + + g = copy.deepcopy(g) + graph_inputs = g.inputs.copy() + + # onnx will pass in cond as argument + iter_node = g.make_node("Placeholder", [], name=utils.make_name("iteration_num"), + output_count=1, dtypes=[TensorProto.INT64], shapes=[[]]) + cond_node = g.make_node("Placeholder", [], name=utils.make_name("cond"), + output_count=1, dtypes=[TensorProto.BOOL], shapes=[[]]) + cond_binding = parameter_binding(cond_graph, g.outputs) + + to_remove = set() + for idx, scan_output in scan_outputs: + inp = graph_inputs[idx] + + # Remove consumers of scan input + stack = [inp] + while stack: + node = stack.pop() + if node not in to_remove: + to_remove.add(node) + for out in node.output: + stack += g.find_output_consumers(out) + + # Remove scan input from cond graph + cond_binding = {k: "@@ALLOC" if v == g.outputs[idx] else v for k, v in cond_binding.items()} + del g.inputs[idx] + del g.outputs[idx] + g.outputs.append(scan_output) + + for node in to_remove: + g.remove_node(node.name) + + # in onnx the body inputs are: index, cond, [loop_vars] + g.inputs = [iter_node, cond_node] + g.inputs + + # Shapes of iteration and cond are already known + for p, c in zip(loop_node_inputs[2:], g.input_names[2:]): + shape = p.output_shapes[0] + g.set_shape(c, shape) + + cond_outputs = inline_subgraph(g, cond_graph, "cond__", cond_binding) + + g.outputs = [cond_outputs[0]] + g.outputs + return g + +@tfl_op(["TFL_IF"], tf_op="If") +class TflIfOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.attr["then_branch"] = node.attr["then_subgraph_index"] + del node.attr["then_subgraph_index"] + node.attr["else_branch"] = node.attr["else_subgraph_index"] + del node.attr["else_subgraph_index"] diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_direct.py b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_direct.py new file mode 100644 index 0000000000000000000000000000000000000000..b064888b3430a4d85286a9c30b2c9e5b7f70f957 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_direct.py @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tfl_direct +""" + +from tf2onnx.handler import tfl_op + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +@tfl_op("TFL_ABS", tf_op="Abs") +@tfl_op("TFL_CEIL", tf_op="Ceil") +@tfl_op("TFL_COS", tf_op="Cos") +@tfl_op("TFL_ELU", tf_op="Elu") +@tfl_op("TFL_EQUAL", tf_op="Equal") +@tfl_op("TFL_EXP", tf_op="Exp") +@tfl_op("TFL_FLOOR", tf_op="Floor") +@tfl_op("TFL_FLOOR_DIV", tf_op="FloorDiv") +@tfl_op("TFL_FLOOR_MOD", tf_op="FloorMod") +@tfl_op("TFL_GREATER", tf_op="Greater") +@tfl_op("TFL_GREATER_EQUAL", tf_op="GreaterEqual") +@tfl_op("TFL_LESS", tf_op="Less") +@tfl_op("TFL_LESS_EQUAL", tf_op="LessEqual") +@tfl_op("TFL_LOG", tf_op="Log") +@tfl_op("TFL_LOG_SOFTMAX", tf_op="LogSoftmax") +@tfl_op("TFL_LOGICAL_AND", tf_op="LogicalAnd") +@tfl_op("TFL_LOGICAL_NOT", tf_op="LogicalNot") +@tfl_op("TFL_LOGICAL_OR", tf_op="LogicalOr") +@tfl_op("TFL_MATRIX_DIAG", tf_op="MatrixDiag") +@tfl_op("TFL_MATRIX_SET_DIAG", tf_op="MatrixSetDiag") +@tfl_op("TFL_MAXIMUM", tf_op="Maximum") +@tfl_op("TFL_MINIMUM", tf_op="Minimum") +@tfl_op("TFL_NEG", tf_op="Neg") +@tfl_op("TFL_NOT_EQUAL", tf_op="NotEqual") +@tfl_op("TFL_POW", tf_op="Pow") +@tfl_op("TFL_RANK", tf_op="Rank") +@tfl_op("TFL_RELU", tf_op="Relu") +@tfl_op("TFL_RELU6", tf_op="Relu6") +@tfl_op("TFL_ROUND", tf_op="Round") +@tfl_op("TFL_RSQRT", tf_op="Rsqrt") +@tfl_op("TFL_SELECT", tf_op="Select") +@tfl_op("TFL_SELECT_V2", tf_op="SelectV2") +@tfl_op("TFL_SIN", tf_op="Sin") +@tfl_op("TFL_SQRT", tf_op="Sqrt") +@tfl_op("TFL_SQUARE", tf_op="Square") +@tfl_op("TFL_SQUARED_DIFFERENCE", tf_op="SquaredDifference") +@tfl_op("TFL_TANH", tf_op="Tanh") +@tfl_op("TFL_WHERE", tf_op="Where") +@tfl_op("TFL_ZEROS_LIKE", tf_op="ZerosLike") +@tfl_op("TFL_FILL", tf_op="Fill") +@tfl_op("TFL_GATHER_ND", tf_op="GatherNd") +@tfl_op("TFL_PAD", tf_op="Pad") +@tfl_op("TFL_REVERSE_V2", tf_op="ReverseV2") +@tfl_op("TFL_SCATTER_ND", tf_op="ScatterNd") +@tfl_op("TFL_SEGMENT_SUM", tf_op="SegmentSum") +@tfl_op("TFL_SHAPE", tf_op="Shape") +@tfl_op("TFL_SLICE", tf_op="Slice") +@tfl_op("TFL_SQUEEZE", tf_op="Squeeze") +@tfl_op("TFL_TILE", tf_op="Tile") +@tfl_op("TFL_EXPAND_DIMS", tf_op="ExpandDims") +@tfl_op("TFL_TRANSPOSE", tf_op="Transpose") +@tfl_op("TFL_UNPACK", tf_op="Unpack") +@tfl_op("TFL_ADD_N", tf_op="AddN") +@tfl_op("TFL_ONE_HOT", tf_op="OneHot") +@tfl_op("TFL_DEPTH_TO_SPACE", tf_op="DepthToSpace") +@tfl_op("TFL_ARG_MIN", tf_op="ArgMin") +@tfl_op("TFL_ARG_MAX", tf_op="ArgMax") +@tfl_op("TFL_NON_MAX_SUPPRESSION_V5", tf_op="NonMaxSuppressionV5") +@tfl_op("TFL_RESIZE_NEAREST_NEIGHBOR", tf_op="ResizeNearestNeighbor") +@tfl_op("TFL_LEAKY_RELU", tf_op="LeakyRelu") +@tfl_op("TFL_STRIDED_SLICE", tf_op="StridedSlice") +@tfl_op("TFL_MEAN", tf_op="Mean") +@tfl_op("TFL_SUM", tf_op="Sum") +@tfl_op("TFL_MIRROR_PAD", tf_op="MirrorPad") +@tfl_op("TFL_RESIZE_BILINEAR", tf_op="ResizeBilinear") +@tfl_op("TFL_REVERSE_SEQUENCE", tf_op="ReverseSequence") +@tfl_op("TFL_SPARSE_TO_DENSE", tf_op="SparseToDense") +@tfl_op("TFL_CUMSUM", tf_op="Cumsum") +class TflDirectOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + pass diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_math.py b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_math.py new file mode 100644 index 0000000000000000000000000000000000000000..03f05c0fbb387a5f1c0343100f06177ad9d31071 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_math.py @@ -0,0 +1,250 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tfl_math +""" + +import logging +import numpy as np +from onnx.onnx_pb import TensorProto +from tf2onnx.handler import tfl_op +from tf2onnx import utils + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +def separate_fused_activation_function(ctx, node): + activation_fn = node.attr['fused_activation_function'].s + del node.attr['fused_activation_function'] + if activation_fn == b'RELU': + ctx.insert_new_node_on_output("Relu", node.output[0]) + elif activation_fn == b'RELU6': + # This is a TF op. We will convert it on the 2nd pass. + shape = ctx.get_shape(node.output[0]) + dtype = ctx.get_dtype(node.output[0]) + new_node = ctx.make_node("Relu6", [node.output[0]], skip_conversion=False, shapes=[shape], dtypes=[dtype]) + ctx.insert_node_on_output(new_node, node.output[0]) + elif activation_fn == b'TANH': + ctx.insert_new_node_on_output("Tanh", node.output[0]) + else: + # TODO: SIGN_BIT and RELU_N1_TO_1 not supported yet + utils.make_sure(activation_fn == b'NONE', "Unsupported fused activation function %s on node %s", + activation_fn, node.name) + +@tfl_op(["TFL_ADD"], tf_op="Add") +class TflAdd: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + +@tfl_op(["TFL_SUB"], tf_op="Sub") +class TflSub: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + +@tfl_op(["TFL_MUL"], tf_op="Mul") +class TflMul: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + +@tfl_op(["TFL_DIV"], tf_op="Div") +class TflDiv: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + +@tfl_op(["TFL_LOGISTIC"], tf_op="Sigmoid") +class TflLogistic: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + pass + +@tfl_op(["TFL_REDUCE_MAX"], tf_op="Max") +@tfl_op(["TFL_REDUCE_MIN"], tf_op="Min") +@tfl_op(["TFL_REDUCE_ANY"], tf_op="Any") +@tfl_op(["TFL_REDUCE_PROD"], tf_op="Prod") +class TflReduceOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + pass + +@tfl_op(["TFL_LOCAL_RESPONSE_NORMALIZATION"], tf_op="LRN") +class TFlLocalResponseNormalizationOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.attr["depth_radius"] = node.attr["radius"] + del node.attr["radius"] + +@tfl_op(["TFL_RANGE"], tf_op="Range") +class TflRangeOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.set_attr("Tidx", ctx.get_dtype(node.output[0])) + +@tfl_op(["TFL_QUANTIZE"], onnx_op="QuantizeLinear") +class TflQuantizeOp: + @classmethod + def version_1(cls, ctx, node, dequantize=False, **kwargs): + # We could just let the TFL_QUANTIZE fall through as an unconverted op, but they are added programmatically + # so that might be confusing. + raise ValueError("Opset 10 is required for quantization. Consider using the --dequantize flag or --opset 10.") + + @classmethod + def version_10(cls, ctx, node, **kwargs): + scale = node.get_attr_value('scale') + zero_point = node.get_attr_value('zero_point') + axis = node.get_attr_value('quantized_dimension') + np_q_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.output[0])) + if len(scale) > 1 or len(zero_point) > 1: + utils.make_sure(ctx.opset >= 13, "Opset 13 is required for per-axis quantization for node %s", node.name) + node.set_attr("axis", axis) + scale_node = ctx.make_const(utils.make_name("scale"), np.array(scale[0], dtype=np.float32)) + zero_point_node = ctx.make_const(utils.make_name("zero_point"), np.array(zero_point[0], dtype=np_q_type)) + ctx.replace_inputs(node, [node.input[0], scale_node.output[0], zero_point_node.output[0]]) + del node.attr["scale"] + del node.attr["zero_point"] + del node.attr["quantized_dimension"] + if "min" in node.attr: + del node.attr["min"] + if "max" in node.attr: + del node.attr["max"] + +@tfl_op(["TFL_DEQUANTIZE"], onnx_op="DequantizeLinear") +class TflDequantizeOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + if 'scale' not in node.attr: + # Somtimes tflite uses a Dequantize to go from fp16 to fp32 + node.type = "Cast" + node.set_attr('to', ctx.get_dtype(node.output[0])) + return + scale = np.array(node.get_attr_value('scale'), dtype=np.float32) + zero_point = np.array(node.get_attr_value('zero_point'), dtype=np.float32) + axis = node.get_attr_value('quantized_dimension') + in_rank = ctx.get_rank(node.input[0]) + def expand_tensor(t): + if t.shape == (1,): + return t[0] + utils.make_sure(in_rank is not None, "Cannot dequantize node %s with unknown input rank", node.name) + new_shape = [1] * in_rank + new_shape[axis] = t.shape[0] + return t.reshape(new_shape) + scale = expand_tensor(scale) + zero_point = expand_tensor(zero_point) + if node.inputs[0].is_const(): + x_val = node.inputs[0].get_tensor_value(as_list=False).astype(np.float32) + new_val = (x_val - zero_point) * scale + dequant_const = ctx.make_const(utils.make_name(node.name), new_val) + ctx.replace_all_inputs(node.output[0], dequant_const.output[0]) + ctx.remove_node(node.name) + else: + scale_const = ctx.make_const(utils.make_name(node.name + "_scale"), scale).output[0] + zero_point_const = ctx.make_const(utils.make_name(node.name + "_zero_point"), zero_point).output[0] + cast_node = ctx.make_node("Cast", [node.input[0]], attr={'to': TensorProto.FLOAT}, + op_name_scope=node.name).output[0] + sub_node = ctx.make_node("Sub", [cast_node, zero_point_const], op_name_scope=node.name).output[0] + mul_node = ctx.make_node("Mul", [sub_node, scale_const], op_name_scope=node.name).output[0] + ctx.replace_all_inputs(node.output[0], mul_node) + ctx.remove_node(node.name) + + @classmethod + def version_10(cls, ctx, node, dequantize=False, **kwargs): + if dequantize or 'scale' not in node.attr: + cls.version_1(ctx, node, dequantize=True, **kwargs) + return + scale = node.get_attr_value('scale') + zero_point = node.get_attr_value('zero_point') + axis = node.get_attr_value('quantized_dimension') + np_q_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[0])) + if len(scale) > 1 or len(zero_point) > 1: + utils.make_sure(ctx.opset >= 13, "Opset 13 is required for per-axis quantization for node %s", node.name) + node.set_attr("axis", axis) + scale_node = ctx.make_const(utils.make_name("scale"), np.array(scale, dtype=np.float32)) + zero_point_node = ctx.make_const(utils.make_name("zero_point"), np.array(zero_point, dtype=np_q_type)) + else: + scale_node = ctx.make_const(utils.make_name("scale"), np.array(scale[0], dtype=np.float32)) + zero_point_node = ctx.make_const(utils.make_name("zero_point"), np.array(zero_point[0], dtype=np_q_type)) + ctx.replace_inputs(node, [node.input[0], scale_node.output[0], zero_point_node.output[0]]) + del node.attr["scale"] + del node.attr["zero_point"] + del node.attr["quantized_dimension"] + if "min" in node.attr: + del node.attr["min"] + if "max" in node.attr: + del node.attr["max"] + +def dynamic_quantize_inputs(ctx, node): + if ctx.opset < 11: + logger.warning("Opset 11 is required for asymmetric_quantize_inputs of node %s", node.name) + return + for i in range(len(node.input)): + # Don't quantize inputs that are already quantized + if node.inputs[i].type in ["DequantizeLinear", "TFL_DEQUANTIZE"]: + continue + dyn_quant = ctx.make_node("DynamicQuantizeLinear", [node.input[i]], output_count=3, op_name_scope=node.name) + dyn_quant.skip_conversion = True + dequant = ctx.make_node("DequantizeLinear", dyn_quant.output, op_name_scope=node.name) + dequant.skip_conversion = True + ctx.replace_input(node, node.input[i], dequant.output[0], input_index=i) + +@tfl_op(["TFL_FULLY_CONNECTED"]) +class TflFullyConnectedOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + utils.make_sure(node.attr['weights_format'].s == b'DEFAULT', + "Only default weights format supported for fully connected op") + utils.make_sure(node.attr['keep_num_dims'].i == 0, + "Only keep_num_dims=False supported for fully connected op") + if node.attr['asymmetric_quantize_inputs'].i == 1: + dynamic_quantize_inputs(ctx, node) + + if ctx.get_rank(node.input[0]) != 2: + # When a fullyconnected node has keep_num_dims=0 and input[0] rank > 2, the extra dims must be compressed + utils.make_sure(ctx.get_rank(node.input[1]) == 2, "weights for FullyConnected must have rank 2") + weights_shape = ctx.get_shape(node.input[1])[1] + utils.make_sure(weights_shape != -1, "weights for FullyConnected must have known shape") + shape_const = ctx.make_const(utils.make_name("reshape_shape"), np.array([-1, weights_shape], np.int64)) + reshape_node = ctx.make_node("Reshape", [node.input[0], shape_const.output[0]]) + reshape_node.skip_conversion = True + ctx.replace_inputs(node, [reshape_node.output[0], node.input[1]]) + + transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1], + name=None, input_index=1, perm=[1, 0]) + transpose_node.skip_conversion = True + node.set_attr("transpose_a", 0) + node.set_attr("transpose_b", 0) + node.type = "MatMul" + + if len(node.input) == 3: + # FIXME: Add a test for this + bias_inp = node.input[2] + ctx.replace_inputs(node, node.input[:2]) + add_node = ctx.insert_new_node_on_output("Add", node.output[0], inputs=[node.output[0], bias_inp]) + add_node.skip_conversion = True + + del node.attr["weights_format"] + del node.attr["keep_num_dims"] + del node.attr["asymmetric_quantize_inputs"] + +@tfl_op(["TFL_SOFTMAX"], tf_op="Softmax") +class TFlSoftmaxOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + beta = node.get_attr_value("beta") + if beta != 1: + beta_node = ctx.make_const(utils.make_name("beta"), np.array(beta, dtype=np.float32)) + mul_node = ctx.insert_new_node_on_output("Mul", node.output[0], name=utils.make_name(node.name)) + ctx.replace_inputs(mul_node, [node.output[0], beta_node.output[0]]) + +@tfl_op(["TFL_PRELU"], onnx_op="PRelu") +class TflPreluOp: + @classmethod + def version_7(cls, ctx, node, **kwargs): + pass diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_nn.py b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_nn.py new file mode 100644 index 0000000000000000000000000000000000000000..56bc4761d12edf3388b485bea197b4fd8ed7b433 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_nn.py @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tfl_nn +""" + +from tf2onnx.handler import tfl_op +from tf2onnx.tflite_handlers.tfl_math import separate_fused_activation_function + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +@tfl_op(["TFL_TRANSPOSE_CONV"], tf_op="Conv2DBackpropInput") +class TflTransposeConv: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + # No need to change 'padding' attribute + stride_h = node.get_attr_int("stride_h") + stride_w = node.get_attr_int("stride_w") + node.set_attr("strides", [1, stride_h, stride_w, 1]) + del node.attr["stride_h"] + del node.attr["stride_w"] + transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1], name=None, perm=[1, 2, 0, 3]) + transpose_node.skip_conversion = True + node.set_attr("data_format", "NHWC") + +@tfl_op(["TFL_CONV_2D"], tf_op="Conv2D") +class TflConv2D: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + # No need to change 'padding' attribute + stride_h = node.get_attr_int("stride_h") + stride_w = node.get_attr_int("stride_w") + dilation_w_factor = node.get_attr_int("dilation_w_factor") + dilation_h_factor = node.get_attr_int("dilation_h_factor") + node.set_attr("strides", [1, stride_h, stride_w, 1]) + node.set_attr("dilations", [1, dilation_h_factor, dilation_w_factor, 1]) + del node.attr["stride_h"] + del node.attr["stride_w"] + del node.attr["dilation_h_factor"] + del node.attr["dilation_w_factor"] + transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1], name=None, perm=[1, 2, 3, 0]) + transpose_node.skip_conversion = True + node.set_attr("data_format", "NHWC") + +@tfl_op(["TFL_AVERAGE_POOL_2D"], tf_op="AvgPool") +@tfl_op(["TFL_MAX_POOL_2D"], tf_op="MaxPool") +class TflAveragePool: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + # No need to change 'padding' attribute + stride_h = node.get_attr_int("stride_h") + stride_w = node.get_attr_int("stride_w") + filter_height = node.get_attr_int("filter_height") + filter_width = node.get_attr_int("filter_width") + node.set_attr("strides", [1, stride_h, stride_w, 1]) + node.set_attr("ksize", [1, filter_height, filter_width, 1]) + del node.attr["stride_h"] + del node.attr["stride_w"] + del node.attr["filter_height"] + del node.attr["filter_width"] + node.set_attr("data_format", "NHWC") + +@tfl_op(["TFL_DEPTHWISE_CONV_2D"], tf_op="DepthwiseConv2dNative") +class TflDepthwiseConv2D: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + separate_fused_activation_function(ctx, node) + # No need to change 'padding' or 'depth_multiplier' attributes + stride_h = node.get_attr_int("stride_h") + stride_w = node.get_attr_int("stride_w") + dilation_w_factor = node.get_attr_int("dilation_w_factor") + dilation_h_factor = node.get_attr_int("dilation_h_factor") + node.set_attr("strides", [1, stride_h, stride_w, 1]) + node.set_attr("dilations", [1, dilation_h_factor, dilation_w_factor, 1]) + del node.attr["stride_h"] + del node.attr["stride_w"] + del node.attr["dilation_h_factor"] + del node.attr["dilation_w_factor"] + transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1], name=None, perm=[1, 2, 3, 0]) + transpose_node.skip_conversion = True + node.set_attr("data_format", "NHWC") + +@tfl_op(["TFL_BATCH_TO_SPACE_ND"], tf_op="BatchToSpaceND") +class TflSlice: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + pass + +@tfl_op(["TFL_SPACE_TO_BATCH_ND"], tf_op="SpaceToBatchND") +class TFlSpaceToBatchNDOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + pass + +@tfl_op(["TFL_SPACE_TO_DEPTH"], tf_op="SpaceToDepth") +class TFlSpaceToDepthOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.set_attr("data_format", "NHWC") + +@tfl_op(["TFL_NON_MAX_SUPPRESSION_V4"], tf_op="NonMaxSuppressionV4") +class TflNonMaxSuppressionV4Op: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.set_attr("pad_to_max_output_size", 1) diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_postprocess.py b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_postprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..f1fd4d94b5a4725c8a3adfaf1f5d9fe0bd37e842 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_postprocess.py @@ -0,0 +1,143 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tfl_postprocess +""" + +import logging +import numpy as np +from onnx.onnx_pb import TensorProto + +from tf2onnx.handler import tfl_op +from tf2onnx import utils +from tf2onnx.graph_builder import GraphBuilder + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +@tfl_op(["TFL_TFLite_Detection_PostProcess"]) +class TflDetectionPostProcess: + @classmethod + def version_11(cls, ctx, node, **kwargs): + # This ops is basically NMS with a little post-processing. + # TFLite implementation: + # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/kernels/detection_postprocess.cc + + # box_encodings.shape = [batch_dim, box_num, 4] + # class_predictions.shape = [batch_dim, box_num, num_classes(+1)] + # anchors.shape = [box_num, 4] + box_encodings, class_predictions, anchors = node.input + + classes_dtype = ctx.get_dtype(node.output[1]) + box_cnt_dtype = ctx.get_dtype(node.output[3]) + + num_classes = node.get_attr_value('num_classes') + max_detections = node.get_attr_value('max_detections') + + # Remove 'other' class if present. + max_int64 = int(utils.get_max_value(np.int64)) + class_predictions = GraphBuilder(ctx).make_slice( + {'data': class_predictions, 'starts': [-num_classes], 'ends': [max_int64], 'axes': [2]}) + + scaling_vector = [node.get_attr_value(a) for a in ['y_scale', 'x_scale', 'h_scale', 'w_scale']] + scale_const = ctx.make_const(utils.make_name('scale_const'), np.array(scaling_vector, np.float32)).output[0] + + scaled_boxes = ctx.make_node('Div', [box_encodings, scale_const]).output[0] + anchors_yx = GraphBuilder(ctx).make_slice({'data': anchors, 'starts': [0], 'ends': [2], 'axes': [1]}) + anchors_hw = GraphBuilder(ctx).make_slice({'data': anchors, 'starts': [2], 'ends': [4], 'axes': [1]}) + boxes_yx = GraphBuilder(ctx).make_slice({'data': scaled_boxes, 'starts': [0], 'ends': [2], 'axes': [2]}) + boxes_hw = GraphBuilder(ctx).make_slice({'data': scaled_boxes, 'starts': [2], 'ends': [4], 'axes': [2]}) + + scaled_boxes_yx = ctx.make_node('Mul', [boxes_yx, anchors_hw]).output[0] + boxes_hw_exp = ctx.make_node('Exp', [boxes_hw]).output[0] + scaled_boxes_hw = ctx.make_node('Mul', [boxes_hw_exp, anchors_hw]).output[0] + const_half = ctx.make_const(utils.make_name('const_half'), np.array(0.5, np.float32)).output[0] + boxes_half_hw = ctx.make_node('Mul', [scaled_boxes_hw, const_half]).output[0] + boxes_center_yx = ctx.make_node('Add', [scaled_boxes_yx, anchors_yx]).output[0] + + boxes_lower_left = ctx.make_node('Sub', [boxes_center_yx, boxes_half_hw]).output[0] + boxes_upper_right = ctx.make_node('Add', [boxes_center_yx, boxes_half_hw]).output[0] + adjusted_boxes = ctx.make_node('Concat', [boxes_lower_left, boxes_upper_right], attr={'axis': 2}).output[0] + + iou_threshold = np.array(node.get_attr_value('nms_iou_threshold'), np.float32) + iou_threshold_const = ctx.make_const(utils.make_name('iou_threshold'), iou_threshold).output[0] + + score_threshold = np.array(node.get_attr_value('nms_score_threshold'), np.float32) + score_threshold_const = ctx.make_const(utils.make_name('score_threshold'), score_threshold).output[0] + + if node.get_attr_value('use_regular_nms', False): + boxes_per_class = np.array(node.get_attr_value('detections_per_class', 100), np.int64) + else: + # When tflite uses FastNMS, detections_per_class is ignored. + logging.warning("NMS node %s uses fast NMS. ONNX will approximate with standard NMS.", node.name) + boxes_per_class = np.array(max_detections, np.int64) + max_boxes_per_class_const = ctx.make_const(utils.make_name('max_boxes_per_class'), boxes_per_class).output[0] + + # scores.shape = [batch_dim, classes_num, box_num] + scores = ctx.make_node('Transpose', [class_predictions], attr={'perm': [0, 2, 1]}).output[0] + + nms_inputs = [adjusted_boxes, scores, max_boxes_per_class_const, iou_threshold_const, score_threshold_const] + # shape: [-1, 3], elts of format [batch_index, class_index, box_index] + selected_indices = ctx.make_node('NonMaxSuppression', nms_inputs, attr={'center_point_box': 0}, + op_name_scope=node.name).output[0] + + selected_boxes_idx = GraphBuilder(ctx).make_slice( + {'data': selected_indices, 'starts': [2], 'ends': [3], 'axes': [1]}) + selected_boxes_idx_sq = GraphBuilder(ctx).make_squeeze({'data': selected_boxes_idx, 'axes': [1]}) + + selected_classes = GraphBuilder(ctx).make_slice( + {'data': selected_indices, 'starts': [1], 'ends': [2], 'axes': [1]}) + selected_classes_sq = GraphBuilder(ctx).make_squeeze({'data': selected_classes, 'axes': [1]}) + + box_and_class_idx = ctx.make_node('Concat', [selected_boxes_idx, selected_classes], attr={'axis': 1}).output[0] + + box_cnt = ctx.make_node('Shape', [selected_classes_sq]).output[0] + + adjusted_boxes_sq = GraphBuilder(ctx).make_squeeze({'data': adjusted_boxes, 'axes': [0]}) + detection_boxes = ctx.make_node('Gather', [adjusted_boxes_sq, selected_boxes_idx_sq]).output[0] + class_predictions_sq = GraphBuilder(ctx).make_squeeze({'data': class_predictions, 'axes': [0]}) + detection_scores = ctx.make_node('GatherND', [class_predictions_sq, box_and_class_idx]).output[0] + + k_const = ctx.make_const(utils.make_name('const_k'), np.array([max_detections], np.int64)).output[0] + if ctx.opset >= 12: + min_k = ctx.make_node('Min', [k_const, box_cnt]).output[0] + else: + # Lower opsets only support Min between floats + box_cnt_float = ctx.make_node('Cast', [box_cnt], attr={'to': TensorProto.FLOAT}).output[0] + k_const_float = ctx.make_node('Cast', [k_const], attr={'to': TensorProto.FLOAT}).output[0] + min_k_float = ctx.make_node('Min', [k_const_float, box_cnt_float]).output[0] + min_k = ctx.make_node('Cast', [min_k_float], attr={'to': TensorProto.INT64}).output[0] + min_k_cast = ctx.make_node('Cast', [min_k], attr={'to': box_cnt_dtype}).output[0] + + scores_top_k, scores_top_k_idx = ctx.make_node('TopK', [detection_scores, min_k], output_count=2).output + + scores_top_k_idx_unsq = GraphBuilder(ctx).make_unsqueeze({'data': scores_top_k_idx, 'axes': [0]}) + scores_top_k_unsq = GraphBuilder(ctx).make_unsqueeze({'data': scores_top_k, 'axes': [0]}) + + selected_classes_sort = ctx.make_node('Gather', [selected_classes_sq, scores_top_k_idx_unsq]).output[0] + classes_sort_cast = ctx.make_node('Cast', [selected_classes_sort], attr={'to': classes_dtype}).output[0] + detection_boxes_sorted = ctx.make_node('Gather', [detection_boxes, scores_top_k_idx_unsq]).output[0] + + pad_amount = ctx.make_node('Sub', [k_const, min_k]).output[0] + + quad_zero_const = ctx.make_const(utils.make_name('quad_zero_const'), np.array([0, 0, 0, 0], np.int64)).output[0] + duo_zero_const = ctx.make_const(utils.make_name('duo_zero_const'), np.array([0, 0], np.int64)).output[0] + zero_const = ctx.make_const(utils.make_name('zero_const'), np.array([0], np.int64)).output[0] + + pads_3d = ctx.make_node('Concat', [quad_zero_const, pad_amount, zero_const], attr={'axis': 0}).output[0] + pads_2d = ctx.make_node('Concat', [duo_zero_const, zero_const, pad_amount], attr={'axis': 0}).output[0] + + detection_boxes_padded = ctx.make_node('Pad', [detection_boxes_sorted, pads_3d]).output[0] + detection_classes_padded = ctx.make_node('Pad', [classes_sort_cast, pads_2d]).output[0] + detection_scores_padded = ctx.make_node('Pad', [scores_top_k_unsq, pads_2d]).output[0] + + ctx.replace_all_inputs(node.output[0], detection_boxes_padded) + ctx.replace_all_inputs(node.output[1], detection_classes_padded) + ctx.replace_all_inputs(node.output[2], detection_scores_padded) + ctx.replace_all_inputs(node.output[3], min_k_cast) + + ctx.remove_node(node.name) diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_tensor.py b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..8d46329ffa87a0e23307970fdba613d0f9058e2b --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_handlers/tfl_tensor.py @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tfl_tensor +""" + +import logging +import numpy as np +from tf2onnx.handler import tfl_op +from tf2onnx import utils + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +@tfl_op(["TFL_CONCATENATION"], onnx_op="Concat") +class TflConcatenation: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + +@tfl_op(["TFL_SPLIT"], tf_op="Split") +class TflSplit: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.attr['num_split'] = node.attr['num_splits'] + del node.attr['num_splits'] + +@tfl_op(["TFL_SPLIT_V"], tf_op="SplitV") +class TflSplitV: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.attr['num_split'] = node.attr['num_splits'] + del node.attr['num_splits'] + +@tfl_op(["TFL_GATHER"], onnx_op="Gather") +class TflGather: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + +@tfl_op(["TFL_RESHAPE"], tf_op="Reshape") +class TflReshape: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + if len(node.input) == 1 or ctx.get_rank(node.input[1]) != 1: + new_shape = node.get_attr_value('new_shape') + if new_shape == [0]: + # Legacy tflite models use a shape parameter of [0] to indicate scalars + new_shape = [] + new_shape_const = ctx.make_const(utils.make_name("new_shape"), np.array(new_shape, np.int64)) + ctx.replace_inputs(node, [node.input[0], new_shape_const.output[0]]) + if 'new_shape' in node.attr: + del node.attr['new_shape'] + +@tfl_op(["TFL_CAST"], tf_op="Cast") +class TflCast: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + dst = ctx.get_dtype(node.output[0]) + if "out_data_type" in node.attr: + del node.attr["out_data_type"] + del node.attr["in_data_type"] + node.set_attr("to", dst) + +@tfl_op(["TFL_PACK"], tf_op="Pack") +class TFlPackOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.attr["N"] = node.attr["values_count"] + del node.attr["values_count"] + +@tfl_op(["TFL_PADV2"], tf_op="PadV2") +class TflPadV2Op: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + pass + +@tfl_op(["TFL_UNIQUE"], tf_op="Unique") +class TFlUniqueOp: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.attr["out_idx"] = node.attr["idx_out_type"] + del node.attr["idx_out_type"] + +@tfl_op(["TFL_TOPK_V2"], tf_op="TopKV2") +class TFlTopKV2Op: + @classmethod + def to_tf(cls, ctx, node, **kwargs): + node.set_attr("sorted", 1) diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/__init__.py b/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b679273a5b5fab49c1ea5d75e89b8489d8815a41 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/__init__.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: Apache-2.0 + +"""tf2onnx.tflite_rewriters module""" + +from tf2onnx.tflite_rewriters.tfl_scan_output_rewriter import rewrite_tfl_scan_outputs +from tf2onnx.tflite_rewriters.tfl_qdq_rewriter import rewrite_tfl_qdq + +__all__ = [ + "rewrite_tfl_scan_outputs", + "rewrite_tfl_qdq" +] diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/tfl_qdq_rewriter.py b/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/tfl_qdq_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..849fa712ccb5bf75b6362c33a254076c8c00c093 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/tfl_qdq_rewriter.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.tflite_rewriters.tfl_qdq_rewriter - Remove qdq sequences to dequantize model +""" +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + + +# pylint: disable=missing-docstring + +def rewrite_tfl_qdq(g, ops): + pattern0 = \ + OpTypePattern('TFL_DEQUANTIZE', name='dequant', inputs=[ + OpTypePattern('TFL_QUANTIZE', name='quant'), + ]) + + matcher = GraphMatcher(pattern0, allow_reorder=False) + match_results = list(matcher.match_ops(ops)) + if match_results: + for match in match_results: + dequant = match.get_op("dequant") + quant = match.get_op("quant") + inp_node = quant.inputs[0] + for k in ["scale", "quantized_dimension", "zero_point"]: + if dequant.get_attr_value(k) != quant.get_attr_value(k): + continue + needed_relu = None + if all(k in quant.attr and len(quant.get_attr_value(k)) == 1 for k in ["min", "max"]): + min_val = quant.get_attr_value("min")[0] + max_val = quant.get_attr_value("max")[0] + if min_val == 0.0 and 5.999 <= max_val <= 6.0: + needed_relu = "TFL_RELU6" + elif min_val == 0.0: + # This may introduce unneeded relu ops but will be correct. + # If the --dequantize feature is used a lot in the future we can optimize this. + needed_relu = "TFL_RELU" + if inp_node.type == needed_relu: + # If it's really obviously unneeded, we skip it. + needed_relu = None + elif "TFL_" + inp_node.get_attr_value("fused_activation_function", b'').decode() == needed_relu: + needed_relu = None + + if needed_relu is not None: + relu_name = inp_node.name + "_relu" + + relu6 = g.make_node(needed_relu, [quant.input[0]], op_name_scope=relu_name, + skip_conversion=False, shapes=quant.output_shapes, dtypes=quant.output_dtypes) + g.replace_all_inputs(dequant.output[0], relu6.output[0]) + else: + g.replace_all_inputs(dequant.output[0], quant.input[0]) + + g.remove_node(dequant.name) + if len(g.find_output_consumers(quant.output[0])) == 0: + g.remove_node(quant.name) + + return ops diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/tfl_scan_output_rewriter.py b/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/tfl_scan_output_rewriter.py new file mode 100644 index 0000000000000000000000000000000000000000..2b7ac11188e8f40db1505075af1e869e729ae501 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_rewriters/tfl_scan_output_rewriter.py @@ -0,0 +1,156 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.tflite_rewriters.tfl_scan_output_rewriter - Identify a common slice/concat pattern in tflite subgraphs +Effectively replace A = A[:i] + [B] + A[i+1:] with A[i] = B +""" +import numpy as np + +from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher + + +# pylint: disable=missing-docstring + +def rewrite_tfl_scan_outputs(g, ops): + pattern0 = \ + OpTypePattern('TFL_CONCATENATION', name='concat', inputs=[ + OpTypePattern('TFL_SLICE', name='begin_slice'), + OpTypePattern('*', name='middle'), + OpTypePattern('TFL_SLICE', name='end_slice') + ]) + + matcher = GraphMatcher(pattern0, allow_reorder=False) + match_results = list(matcher.match_ops(ops)) + if match_results: + for match in match_results: + concat = match.get_op("concat") + begin_slice = match.get_op("begin_slice") + middle = match.get_op("middle") + end_slice = match.get_op("end_slice") + middle_shape = g.get_shape(middle.output[0]) + + # Both slices must be slicing the same tensor + if begin_slice.input[0] != end_slice.input[0]: + continue + original_tensor = begin_slice.input[0] + if concat.get_attr_int("axis") != 0: + continue + # The inserted slice must have length 1 (to be a single index) + if middle_shape is None or len(middle_shape) == 0 or middle_shape[0] != 1: + continue + rank = len(middle_shape) + scan_output = middle.output[0] + if not begin_slice.inputs[1].is_const() or not end_slice.inputs[2].is_const(): + continue + # The first slice must start from the beginning (0) for all dims + if not all(v == 0 for v in begin_slice.inputs[1].get_tensor_value()): + continue + # The second slice must slice to the end (-1) for all dims + if not all(v == -1 for v in end_slice.inputs[2].get_tensor_value()): + continue + # The other slice dims are assembled by concatenation if rank > 1 + if rank > 1: + begin_concat = begin_slice.inputs[2] + end_concat = end_slice.inputs[1] + if not begin_concat.type == "TFL_CONCATENATION": + continue + if not end_concat.type == "TFL_CONCATENATION": + continue + # Except for dim 0, slice from beginning to end + if not all(get_uniform_const_val(inp) == -1 for inp in begin_concat.inputs[1:]): + continue + if not all(get_uniform_const_val(inp) == 0 for inp in end_concat.inputs[1:]): + continue + begin_idx = begin_concat.inputs[0] + end_idx = end_concat.inputs[0] + else: + begin_idx = begin_slice.inputs[2] + end_idx = end_slice.inputs[1] + # For dim 0, slice to i for first part and from i+1 for second + if not node_is_one_plus_node(begin_idx, end_idx): + continue + out1, _ = get_out_and_offset(begin_idx) + graph_inps = [n.output[0] for n in g.inputs] + # To be a scan output, i must be a graph input + if out1 not in graph_inps: + continue + # The array being sliced must be a graph input + if original_tensor not in graph_inps: + continue + # The input/output index of i + idx = graph_inps.index(out1) + # The input/output index of the array + scan_output_idx = graph_inps.index(original_tensor) + # For a scan output, i must be assigned to i+1 with each iteration + if not node_is_one_plus_node(g.get_node_by_output(out1), g.get_node_by_output(g.outputs[idx])): + continue + if len(g.find_output_consumers(concat.output[0])) > 1: + continue + + if g.opset < 10 and len(g.find_output_consumers(concat.output[0])) <= 1: + # If opset is < 10, conversion of the subgraph will fail unless we remove the slice nodes + # We add a tmp node to replace them. + shape = g.get_shape(concat.output[0]) + dtype = g.get_dtype(concat.output[0]) + tmp_node = g.make_node("TMP_SCAN_OUTPUT", [original_tensor, scan_output], + shapes=[shape], dtypes=[dtype]) + g.replace_all_inputs(concat.output[0], tmp_node.output[0]) + + to_remove = [] + out = g.outputs[scan_output_idx] + node = g.get_node_by_output(out) + to_remove.append(node) + + while len(node.input) > 0 and node != concat: + out = node.input[0] + node = g.get_node_by_output(out) + to_remove.append(node) + + to_remove += [begin_slice, end_slice, concat] + + out = original_tensor + node = g.get_node_by_output(out) + to_remove.append(node) + + while len(node.input) > 0: + out = node.input[0] + node = g.get_node_by_output(out) + to_remove.append(node) + + if not g.is_safe_to_remove_nodes(to_remove): + continue + + g.scan_outputs.append((scan_output_idx, scan_output)) + return ops + +def get_uniform_const_val(n): + if not n.is_const(): + return None + v = n.get_tensor_value(as_list=False).flatten() + if len(v) == 0: + return None + if np.all(v == v[0]): + return v[0] + return None + +def get_out_and_offset(n): + if n.type in ['TFL_RESHAPE', 'TFL_IDENTITY', 'Identity']: + return get_out_and_offset(n.inputs[0]) + if n.type == 'TFL_ADD': + v1 = get_uniform_const_val(n.inputs[0]) + v2 = get_uniform_const_val(n.inputs[1]) + if v1 is not None and v2 is not None: + return '', v1 + v2 + if v1 is not None: + inp2, o2 = get_out_and_offset(n.inputs[1]) + return inp2, v1 + o2 + if v2 is not None: + inp1, o1 = get_out_and_offset(n.inputs[0]) + return inp1, v2 + o1 + return n.output[0], 0 + +def node_is_one_plus_node(node, one_plus_node): + n1, o1 = get_out_and_offset(node) + n2, o2 = get_out_and_offset(one_plus_node) + return n1 == n2 and o1 + 1 == o2 diff --git a/lib/python3.10/site-packages/tf2onnx/tflite_utils.py b/lib/python3.10/site-packages/tf2onnx/tflite_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9e296ec1713a1399926a75862f7436d7eacba7f8 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite_utils.py @@ -0,0 +1,434 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.tflite_utils - utilities for parsing tflite files into onnx graph +""" + +import collections +import importlib +import logging +import struct + +from onnx import helper, onnx_pb, numpy_helper +from tensorflow.core.framework import types_pb2, tensor_pb2, node_def_pb2 +from tensorflow.python.framework import tensor_util +import tensorflow as tf +import numpy as np +from tf2onnx.tflite.TensorType import TensorType as TFLiteTensorType +from tf2onnx.tflite.Model import Model +from tf2onnx.flexbuffers import read_flexbuffer +from tf2onnx.tf_utils import read_tf_node_def_attrs +from tf2onnx import utils + +logger = logging.getLogger(__name__) + +TFLITE_TO_ONNX_DTYPE = { + TFLiteTensorType.FLOAT32: onnx_pb.TensorProto.FLOAT, + TFLiteTensorType.FLOAT16: onnx_pb.TensorProto.FLOAT16, + TFLiteTensorType.INT32: onnx_pb.TensorProto.INT32, + TFLiteTensorType.UINT8: onnx_pb.TensorProto.UINT8, + TFLiteTensorType.INT64: onnx_pb.TensorProto.INT64, + TFLiteTensorType.STRING: onnx_pb.TensorProto.STRING, + TFLiteTensorType.BOOL: onnx_pb.TensorProto.BOOL, + TFLiteTensorType.INT16: onnx_pb.TensorProto.INT16, + TFLiteTensorType.COMPLEX64: onnx_pb.TensorProto.COMPLEX64, + TFLiteTensorType.INT8: onnx_pb.TensorProto.INT8, + TFLiteTensorType.FLOAT64: onnx_pb.TensorProto.DOUBLE, + TFLiteTensorType.COMPLEX128: onnx_pb.TensorProto.COMPLEX128, + TFLiteTensorType.UINT64: onnx_pb.TensorProto.UINT64, +} + + +TFLITE_TO_TF_DTYPE = { + TFLiteTensorType.FLOAT32: types_pb2.DT_FLOAT, + TFLiteTensorType.FLOAT16: types_pb2.DT_HALF, + TFLiteTensorType.INT32: types_pb2.DT_INT32, + TFLiteTensorType.UINT8: types_pb2.DT_UINT8, + TFLiteTensorType.INT64: types_pb2.DT_INT64, + TFLiteTensorType.STRING: types_pb2.DT_STRING, + TFLiteTensorType.BOOL: types_pb2.DT_BOOL, + TFLiteTensorType.INT16: types_pb2.DT_INT16, + TFLiteTensorType.COMPLEX64: types_pb2.DT_COMPLEX64, + TFLiteTensorType.INT8: types_pb2.DT_INT8, + TFLiteTensorType.FLOAT64: types_pb2.DT_DOUBLE, + TFLiteTensorType.COMPLEX128: types_pb2.DT_COMPLEX128, + TFLiteTensorType.UINT64: types_pb2.DT_UINT64, +} + + +def map_tflite_dtype_to_onnx(dtype): + return TFLITE_TO_ONNX_DTYPE[dtype] + + +def map_tflite_dtype_to_tf(dtype): + return TFLITE_TO_TF_DTYPE[dtype] + + +# The tflite schema uses snake case, but the python bindings use proper case +def snake_to_proper_case(name): + return ''.join(n.capitalize() for n in name.split('_')) + + +def proper_to_snake_case(name): + res = '' + for c in name: + if c.isupper() and res: + res += '_' + res += c.lower() + return res + +# Pulled from the tflite schema.fbs file. Needed to decode enum numbers into strings. +NODE_ATTR_NAME_TO_ENUM_TYPE = { + 'fused_activation_function': 'ActivationFunctionType', + 'padding': 'Padding', + 'type': 'LSHProjectionType', + 'weights_format': 'FullyConnectedOptionsWeightsFormat', + 'kernel_type': 'LSTMKernelType', + 'combiner': 'CombinerType', + 'in_data_type': 'TensorType', + 'out_data_type': 'TensorType', + 'output_type': 'TensorType', + 'out_type': 'TensorType', + 'mode': 'MirrorPadMode', + 'idx_out_type': 'TensorType', +} +NODE_ATTR_NAME_TO_ENUM_TYPE = {snake_to_proper_case(key): value for key, value in NODE_ATTR_NAME_TO_ENUM_TYPE.items()} + +# Pulled from the tflite schema.fbs file. +FUNCTION_ATTRS = ['then_subgraph_index', 'else_subgraph_index', 'cond_subgraph_index', + 'body_subgraph_index', 'subgraph'] +FUNCTION_ATTRS = [snake_to_proper_case(attr) for attr in FUNCTION_ATTRS] + + +enum_cache = {} +def lookup_enum(idx, enum_name): + """Given the name of a tflite enum class and an index, return a string with the name of the enum value""" + if enum_name == 'TensorType': + return map_tflite_dtype_to_onnx(idx) + if enum_name in enum_cache: + return enum_cache[enum_name][idx] + module = importlib.import_module('tf2onnx.tflite.' + enum_name) + enum_class = getattr(module, enum_name) + idx_to_name = {value: key for key, value in enum_class.__dict__.items() if not key.startswith('_')} + enum_cache[enum_name] = idx_to_name + return idx_to_name[idx] + + +def get_options_class(name): + """Each tflite optype has a flatbuffer Options class (ex: AddOptions). Returns the options class given its name.""" + if name == "NONE": + return None + module = importlib.import_module('tf2onnx.tflite.' + name) + return getattr(module, name) + + +def read_tflite_model(tflite_path): + """ + Given the path to a tflite model, returns tuple (tflite_graphs, opcodes_map, model) + Graphs are topologically sorted and the main graph is last + Pass these to parse_tflite_graph + """ + with open(tflite_path, 'rb') as f: + buf = f.read() + buf = bytearray(buf) + model = Model.GetRootAsModel(buf, 0) + # To save space, each op in the model indicates its opcode as an index into the model's opcode map. + opcodes_map = {} + for i in range(model.OperatorCodesLength()): + op_code = model.OperatorCodes(i) + # TFlite ran out of opcodes since they only used a byte. Old models store opcodes in DeprecatedBuiltinCode. + # New models put PLACEHOLDER_FOR_GREATER_OP_CODES in this field to signify that BuiltinCode should be used. + code = lookup_enum(op_code.DeprecatedBuiltinCode(), 'BuiltinOperator') + if code == 'PLACEHOLDER_FOR_GREATER_OP_CODES': + code = lookup_enum(op_code.BuiltinCode(), 'BuiltinOperator') + if code == 'CUSTOM': + code = op_code.CustomCode().decode() + opcodes_map[i] = code + # Shapes stored in tflite models are not always reliable so we get them from the interpreter if possible. + tensor_shapes = {} + try: + interpreter = tf.lite.Interpreter(tflite_path) + interpreter.allocate_tensors() + tensor_cnt = model.Subgraphs(0).TensorsLength() + for i in range(tensor_cnt): + name = model.Subgraphs(0).Tensors(i).Name().decode() + details = interpreter._get_tensor_details(i) # pylint: disable=protected-access + if "shape_signature" in details: + tensor_shapes[name] = details["shape_signature"].tolist() + elif "shape" in details: + tensor_shapes[name] = details["shape"].tolist() + except Exception as e: # pylint: disable=broad-except + logger.warning("Error loading model into tflite interpreter: %s", e) + tflite_graphs = get_model_subgraphs(model) + return tflite_graphs, opcodes_map, model, tensor_shapes + + +def get_subgraph_dependencies(model, graph_idx): + """Returns a list of subgraph indices referenced by the indicated graph""" + dependencies = [] + g = model.Subgraphs(graph_idx) + for i in range(g.OperatorsLength()): + op = g.Operators(i) + options_type_name = lookup_enum(op.BuiltinOptionsType(), 'BuiltinOptions') + option_class = get_options_class(options_type_name) + if option_class is not None: + options = option_class() + options.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos) + for attr in FUNCTION_ATTRS: + if hasattr(options, attr): + value = getattr(options, attr)() + dependencies.append(value) + return dependencies + + +def get_model_subgraphs(model): + """Returns topologically sorted subgraphs of a model. Guarantees main graph is placed at the end.""" + main_g = 0 + dependencies = {} + idx_to_graph = {} + for i in range(model.SubgraphsLength()): + idx_to_graph[i] = model.Subgraphs(i) + ds = get_subgraph_dependencies(model, i) + utils.make_sure(main_g not in ds, "Main graph %s is a dependency of subgraph %s", main_g, i) + dependencies[i] = ds + + ordered = [] + visited = set() + visiting = set() + def visit(g): + utils.make_sure(g not in visiting, "Subgraphs have cyclic dependencies: %r", dependencies) + if g in visited: + return + visiting.add(g) + for d in dependencies[g]: + visit(d) + visited.add(g) + ordered.append(g) + visiting.remove(g) + + for g in reversed(range(model.SubgraphsLength())): + visit(g) + + return [idx_to_graph[i] for i in ordered] + + +def get_quantization_attr(quant_params): + attr = {} + attr['scale'] = quant_params.ScaleAsNumpy().tolist() + attr['zero_point'] = quant_params.ZeroPointAsNumpy().tolist() + attr['quantized_dimension'] = quant_params.QuantizedDimension() + if not quant_params.MaxIsNone(): + attr['max'] = quant_params.MaxAsNumpy().tolist() + if not quant_params.MinIsNone(): + attr['min'] = quant_params.MinAsNumpy().tolist() + return attr + + +def parse_tflite_string_tensor(buffer_bytes, shape): + """Returns an onnx tensor with the string data encoded in the tflite tensor data buffer""" + def read_int(offset): + return struct.unpack(' 0: + # For const values we use TF to decode the binary data from the buffer + t = tensor_pb2.TensorProto() + t.tensor_content = buf.DataAsNumpy().tobytes() + if output_shapes[name] is None: + output_shapes[name] = [] + for d in output_shapes[name]: + t.tensor_shape.dim.add().size = d + t.dtype = map_tflite_dtype_to_tf(tensor.Type()) + if t.dtype == tf.string: + onnx_tensor = parse_tflite_string_tensor(t.tensor_content, output_shapes[name]) + else: + np_data = tensor_util.MakeNdarray(t) + onnx_tensor = numpy_helper.from_array(np_data, name=name) + onnx_node = helper.make_node("Const", [], outputs=[name], name=name, value=onnx_tensor) + onnx_nodes.append(onnx_node) + op_cnt["Const"] += 1 + + def get_dequant(tensor_name): + """Creates a dequantize op for the provided tensor if needed and returns the output of the op, or + the original tensor name if no dequantization is needed""" + quant = name_to_tensor[tensor_name].Quantization() + if quant is None or quant.ScaleIsNone() or quant.ZeroPointIsNone(): + return tensor_name + if tensor_name in tensor_name_to_dequant_output: + return tensor_name_to_dequant_output[tensor_name] + dequant_name = tensor_name + "_dequant" + attr = get_quantization_attr(quant) + onnx_node = helper.make_node("TFL_DEQUANTIZE", [tensor_name], [dequant_name], name=dequant_name, **attr) + onnx_nodes.append(onnx_node) + tensor_name_to_dequant_output[tensor_name] = dequant_name + output_shapes[dequant_name] = output_shapes[tensor_name].copy() + dtypes[dequant_name] = onnx_pb.TensorProto.FLOAT + return dequant_name + + def get_prequant(tensor_name): + """Called by nodes with the name of the tensor they must output. + If the output is supposed to be quantized, creates a Quantize op outputting the tensor. + Returns the name that should be used for the "prequantized" tensor, or the original tensor if no quantization + is needed""" + quant = name_to_tensor[tensor_name].Quantization() + if quant is None or quant.ScaleIsNone() or quant.ZeroPointIsNone(): + return tensor_name + prequant_name = tensor_name + "_prequant" + quantize_name = tensor_name + "_quantize" + attr = get_quantization_attr(quant) + onnx_node = helper.make_node("TFL_QUANTIZE", [prequant_name], [tensor_name], name=quantize_name, **attr) + onnx_nodes.append(onnx_node) + output_shapes[prequant_name] = output_shapes[tensor_name].copy() + dtypes[prequant_name] = onnx_pb.TensorProto.FLOAT + return prequant_name + + for i in range(tflite_g.OperatorsLength()): + op = tflite_g.Operators(i) + optype = 'TFL_' + opcodes_map[op.OpcodeIndex()] + op_cnt[optype] += 1 + attr = {} + options_type_name = lookup_enum(op.BuiltinOptionsType(), 'BuiltinOptions') + option_class = get_options_class(options_type_name) + wants_dequantized_input = True + has_prequantized_output = True + if optype == 'TFL_QUANTIZE': + out_tensor = tflite_g.Tensors(op.Outputs(0)) + quant = out_tensor.Quantization() + has_prequantized_output = False + if quant is not None and not quant.ScaleIsNone() and not quant.ZeroPointIsNone(): + attr.update(get_quantization_attr(quant)) + elif optype == 'TFL_DEQUANTIZE': + in_tensor = tflite_g.Tensors(op.Inputs(0)) + quant = in_tensor.Quantization() + wants_dequantized_input = False + if quant is not None and not quant.ScaleIsNone() and not quant.ZeroPointIsNone(): + attr.update(get_quantization_attr(quant)) + input_names = [tensor_names[op.Inputs(i)] for i in range(op.InputsLength()) if op.Inputs(i) != -1] + output_names = [tensor_names[op.Outputs(i)] for i in range(op.OutputsLength()) if op.Outputs(i) != -1] + if optype.startswith("TFL_Flex"): + data = read_flexbuffer(op.CustomOptionsAsNumpy().tobytes(), decode_strings=False) + utils.make_sure(isinstance(data, list), "Flex ops are expected to store data as a flexbuffer list") + tf_op = data[0].decode("utf-8") + tf_node_def = node_def_pb2.NodeDef() + tf_node_def.ParseFromString(data[1]) + input_tf_dtypes = [map_tflite_dtype_to_tf(name_to_tensor[inp].Type()) for inp in input_names] + def shape_to_tf_shape(dims): + return [None if d < 0 else d for d in dims] + input_shapes = [shape_to_tf_shape(output_shapes[inp]) for inp in input_names] + tf_attrs, _ = read_tf_node_def_attrs(tf_node_def, input_tf_dtypes, input_shapes) + attr.update(tf_attrs) + optype = tf_op + elif not op.CustomOptionsIsNone(): + custom_ops_format = lookup_enum(op.CustomOptionsFormat(), 'CustomOptionsFormat') + if custom_ops_format == 'FLEXBUFFERS': + data = None + try: + data = read_flexbuffer(op.CustomOptionsAsNumpy().tobytes()) + except Exception as e: # pylint: disable=broad-except + logger.warning("Could not parse attributes for custom op '%s': %s", optype, e) + if isinstance(data, dict): + attr.update(data) + if option_class is not None: + options = option_class() + options.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos) + # All flatbuffer objects have these properties. + block_list = [options_type_name + 'BufferHasIdentifier', 'Init', 'GetRootAs' + options_type_name] + # The rest of the properties of the options class provide its attribute names + attr_names = {opt for opt in dir(options) if not opt.startswith('_') and opt not in block_list} + for a in list(attr_names): + # Flatbufffer list properties have 3 functions: *Length, *IsNone, and *AsNumpy + if a + 'Length' in attr_names: + attr_names.remove(a + 'Length') + attr_names.remove(a + 'IsNone') + attr_names.remove(a) + for a in attr_names: + if a.endswith('AsNumpy'): + value = getattr(options, a)().tolist() + a = a[:-len('AsNumpy')] + else: + # For enums we use a string with the value name, not enum index + value = getattr(options, a)() + if a in NODE_ATTR_NAME_TO_ENUM_TYPE: + value = lookup_enum(value, NODE_ATTR_NAME_TO_ENUM_TYPE[a]) + elif a in FUNCTION_ATTRS: + value = model.Subgraphs(value).Name().decode() + attr_cnt[a] += 1 + attr[proper_to_snake_case(a)] = value + if wants_dequantized_input: + input_names = [get_dequant(inp) for inp in input_names] + if optype == "TFL_TFLite_Detection_PostProcess": + # There's a bug in tflite for the output shapes of this op + for out, shape in zip(output_names, [[-1, -1, 4], [-1, -1], [-1, -1], [-1]]): + if len(output_shapes[out]) != len(shape): + output_shapes[out] = shape + if has_prequantized_output: + output_names = [get_prequant(out) for out in output_names] + onnx_node = helper.make_node(optype, input_names, output_names, name=output_names[0], **attr) + onnx_nodes.append(onnx_node) + + inputs = [tensor_names[tflite_g.Inputs(i)] for i in range(tflite_g.InputsLength())] + outputs = [tensor_names[tflite_g.Outputs(i)] for i in range(tflite_g.OutputsLength())] + # TODO: Allow input/outputs to be overridden + + for inp in inputs: + onnx_node = helper.make_node("Placeholder", [], outputs=[inp], name=inp) + onnx_nodes.append(onnx_node) + + graph_name = (tflite_g.Name() or b'tflite graph').decode() + return onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, inputs, outputs, graph_name diff --git a/lib/python3.10/site-packages/tf2onnx/tfonnx.py b/lib/python3.10/site-packages/tf2onnx/tfonnx.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0e10d0254b09c2857fe4b001368c52209b54de --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tfonnx.py @@ -0,0 +1,666 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.tf2onnx - rewrite tensorflow graph to onnx graph +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import collections +import sys +import traceback + +import numpy as np +from onnx import onnx_pb + +import tf2onnx +import tf2onnx.onnx_opset # pylint: disable=unused-import +import tf2onnx.tflite_handlers # pylint: disable=unused-import +import tf2onnx.custom_opsets # pylint: disable=unused-import +from tf2onnx.graph import Graph +from tf2onnx.rewriter import * # pylint: disable=wildcard-import +from tf2onnx.tflite_rewriters import * # pylint: disable=wildcard-import +from tf2onnx.shape_inference import infer_shape +from tf2onnx.tf_loader import is_function, resolve_functions, set_function +from tf2onnx.tf_utils import tensorflow_to_onnx, get_tf_version, compute_const_folding_using_tf +from tf2onnx.tflite_utils import read_tflite_model, parse_tflite_graph + +from . import constants, logging, schemas, utils, handler + +logger = logging.getLogger(__name__) + + +# pylint: disable=useless-return,broad-except,logging-not-lazy,unused-argument,missing-docstring +# pylint: disable=unused-variable + +def fold_constants_using_tf(g, outputs_to_values, outputs_to_dtypes): + ops = list(g.get_nodes()) + # pylint: disable=too-many-nested-blocks + keep_looking = True + while keep_looking: + keep_looking = False + for idx, op in enumerate(ops): + if op.output and op.output[0] in outputs_to_values: + logger.info("folding node using tf type=%s, name=%s" % (op.type, op.name)) + val = outputs_to_values[op.output[0]] + + new_node_name = utils.make_name(op.name) + new_output_name = new_node_name + old_output_name = op.output[0] + old_node_name = op.name + logger.debug("create const node [%s] replacing [%s]", new_node_name, old_node_name) + ops[idx] = g.make_const(new_node_name, val) + + logger.debug("replace old output [%s] with new output [%s]", old_output_name, new_output_name) + # need to re-write the consumers input name to use the const name + consumers = g.find_output_consumers(old_output_name) + if consumers: + for consumer in consumers: + g.replace_input(consumer, old_output_name, new_output_name) + + # keep looking until there is nothing we can fold. + keep_looking = True + + g.reset_nodes(ops) + +def rewrite_constant_fold(g, ops): + """ + We call tensorflow transform with constant folding but in some cases tensorflow does + fold all constants. Since there are a bunch of ops in onnx that use attributes where + tensorflow has dynamic inputs, we badly want constant folding to work. For cases where + tensorflow missed something, make another pass over the graph and fix want we care about. + """ + func_map = { + "Add": np.add, + "GreaterEqual": np.greater_equal, + "Cast": np.cast, + "ConcatV2": np.concatenate, + "Less": np.less, + "ListDiff": np.setdiff1d, + "Mul": np.multiply, + "Pack": np.stack, + "Range": np.arange, + "Sqrt": np.sqrt, + "Sub": np.subtract, + } + ops = list(ops) + + # pylint: disable=too-many-nested-blocks + keep_looking = True + while keep_looking: + keep_looking = False + for idx, op in enumerate(ops): + func = func_map.get(op.type) + if func is None: continue + if set(op.output) & set(g.outputs): continue + try: + inputs = [] + for node in op.inputs: + if not node.is_const(): + break + inputs.append(node.get_tensor_value(as_list=False)) + + logger.debug("op name %s, %s, %s", op.name, len(op.input), len(inputs)) + if inputs and len(op.input) == len(inputs): + logger.info("folding node type=%s, name=%s" % (op.type, op.name)) + if op.type == "Cast": + dst = op.get_attr_int("to") + np_type = tf2onnx.utils.map_onnx_to_numpy_type(dst) + val = np.cast[np_type](*inputs) + elif op.type == "ConcatV2": + axis = inputs[-1] + values = inputs[:-1] + val = func(tuple(values), axis) + elif op.type == "ListDiff": + out_type = op.get_attr_int("out_idx") + np_type = tf2onnx.utils.map_onnx_to_numpy_type(out_type) + val = func(*inputs) + val = val.astype(np_type) + elif op.type in ["Pack"]: + # handle ops that need input array and axis + axis = op.get_attr_int("axis") + val = func(inputs, axis=axis) + elif op.type == "Range": + dtype = op.get_attr_int("Tidx") + np_type = tf2onnx.utils.map_onnx_to_numpy_type(dtype) + val = func(*inputs, dtype=np_type) + else: + val = func(*inputs) + + new_node_name = utils.make_name(op.name) + new_output_name = new_node_name + old_output_name = op.output[0] + old_node_name = op.name + logger.debug("create const node [%s] replacing [%s]", new_node_name, old_node_name) + ops[idx] = g.make_const(new_node_name, val) + + logger.debug("replace old output [%s] with new output [%s]", old_output_name, new_output_name) + # need to re-write the consumers input name to use the const name + consumers = g.find_output_consumers(old_output_name) + if consumers: + for consumer in consumers: + g.replace_input(consumer, old_output_name, new_output_name) + + # keep looking until there is nothing we can fold. + # We keep the graph in topological order so if we folded, + # the result might help a following op. + keep_looking = True + except Exception as ex: + tb = traceback.format_exc() # pylint: disable=bare-except + logger.info("exception: %s, details: %s", ex, tb) + # ignore errors + + # pylint: enable=too-many-nested-blocks + return ops + + +def rewrite_incomplete_type_support(g, ops, impacted_ops): + """ + for ops that have inclomplete type support, insert casts. + This is needed for some tensor ops in opset7 and for some ops in winml-rs5. + It is not helping performance but better than the model not working at all. + """ + ignored_input_index = { + "Tile": [1], # Tile's second input can only be int64 + "Where": [0], # Where's first input is bool + } + new_ops = [] + org_ops = list(ops) + for op in org_ops: + if op.type in impacted_ops: + cast_inserted = [] + output_dtype = None + ignored_inputs = ignored_input_index.get(op.type) + # insert casts on inputs if the runtime only supports float + for i, input_node in enumerate(op.inputs): + if ignored_inputs and i in ignored_inputs: + continue + + input_name = op.input[i] + dtype = g.get_dtype(input_name) + if dtype is None: + logger.warning("adding Cast for op %s (type is %s)' input: %s, dtype should not be None", + op.name, op.type, input_name) + + if dtype != onnx_pb.TensorProto.FLOAT: + output_dtype = dtype + logger.debug("insert cast for node %s on input %s", op.name, input_name) + if input_node and input_node.type == "Cast" \ + and len(g.find_output_consumers(input_node.output[0])) == 1: + input_node.set_attr("to", onnx_pb.TensorProto.FLOAT) + g.set_dtype(input_name, onnx_pb.TensorProto.FLOAT) + else: + cast_node = g.insert_new_node_on_input(op, "Cast", input_name, + to=onnx_pb.TensorProto.FLOAT) + g.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT) + g.copy_shape(input_name, cast_node.output[0]) + cast_inserted.append(cast_node) + if output_dtype: + # insert reverse cast if needed + for output_name in op.output: + name = utils.make_name(op.name) + logger.debug("insert cast back for node %s on output %s [dtype=%s]", op.name, output_name, + output_dtype) + output_cast = g.insert_new_node_on_output("Cast", output_name, name=name, + to=output_dtype) + g.set_dtype(output_cast.output[0], output_dtype) + g.copy_shape(output_name, output_cast.output[0]) + cast_inserted.append(output_cast) + + if cast_inserted: + new_ops.extend(cast_inserted) + new_ops.append(op) + return new_ops + + +def rewrite_incomplete_type_support_rs5(g, ops): + return rewrite_incomplete_type_support(g, ops, ["Unsqueeze", "Mul", "Concat", "Slice", "Transpose"]) + + +def rewrite_incomplete_type_support_rs6(g, ops): + impacted_ops = [ + "Div", + "IsNaN", + "Max", + "Min", + "ReduceSum", + "Slice", + "Split", + "Tile", + "Transpose", + "Where" + ] + # TODO: logic to insert cast has bug, not all inputs of one node need cast + # for example, slice's input "starts" doesn't need it. + if g.opset == 10: + impacted_ops.remove("Slice") + + return rewrite_incomplete_type_support(g, ops, impacted_ops) + + +def tensorflow_onnx_mapping(g, ops_mapping, initialized_tables=None, is_tflite=False, dequantize=False): + logger.verbose("Mapping TF node to ONNX node(s)") + mapped_op = collections.Counter() + unmapped_op = collections.Counter() + exceptions = [] + if initialized_tables is None: + initialized_tables = {} + + ops = list(g.get_nodes()) + for node in ops: + logger.debug("Process node: %s\n%s", node.name, node.summary) + + if node.need_skip(): + logger.debug("explicitly skip node " + node.name) + continue + + op = node.type + map_info = ops_mapping.get(op) + if map_info is None: + unmapped_op[op] += 1 + if not is_tflite: + logger.error("Tensorflow op [%s: %s] is not supported", node.name, op) + continue + mapped_op[op] += 1 + + func, kwargs = map_info + if kwargs: + # if there is a tf_op/onnx_op key we'll map the old type to a new type + converted_op = kwargs.get("tf_op" if is_tflite else "onnx_op") + if converted_op: + # sometimes the handler wants to know what the old op name was + kwargs["tfl_op" if is_tflite else "tf_op"] = op + node.type = converted_op + body_graphs = node.get_body_graphs() + if body_graphs: + for attr, b_g in body_graphs.items(): + logger.debug("start handling subgraph of %s's attribute %s", node.name, attr) + b_g.topological_sort(b_g.get_nodes()) + # we assume only ONNX nodes have subgraph defined in pre-rewriters. + # that means, if we create node having subgraphs in this step, the + # created subgraphs' nodes won't be mapped. + m_ops, unm_ops, body_exceptions = tensorflow_onnx_mapping(b_g, ops_mapping) + mapped_op += m_ops + unmapped_op += unm_ops + # topological_sort on the body in case processing has changed the order + b_g.topological_sort(b_g.get_nodes()) + exceptions.extend(body_exceptions) + logger.debug("finish handling subgraph of %s's attribute %s", node.name, attr) + + try: + func(g, node, **kwargs, initialized_tables=initialized_tables, dequantize=dequantize) + if not is_tflite: + # tensorflow nodes must be converted in the next pass + node.skip_conversion = True + except Exception as ex: + try: + # If the graph is corrupt from the exception this can fail + summary = node.summary + except Exception: + summary = "" + logger.error("Failed to convert node %r (fct=%r)\n%r", + node.name, func, summary, exc_info=1) + exceptions.append(ex) + + return mapped_op, unmapped_op, exceptions + + +def transpose_inputs(ctx, inputs_as_nchw): + """Insert a transpose from NHWC to NCHW on model input on users request.""" + ops = [] + for node in ctx.get_nodes(): + for idx, output_name in enumerate(node.output): + if output_name in inputs_as_nchw: + shape = ctx.get_shape(output_name) + if len(shape) != len(constants.NCHW_TO_NHWC): + logger.warning("transpose_input for %s: shape must be rank 4, ignored" % output_name) + ops.append(node) + continue + # insert transpose + op_name = utils.make_name(node.name) + transpose = ctx.insert_new_node_on_output("Transpose", output_name, name=op_name) + transpose.set_attr("perm", constants.NCHW_TO_NHWC) + ctx.copy_shape(output_name, transpose.output[0]) + ctx.set_shape(output_name, np.array(shape)[constants.NHWC_TO_NCHW]) + ops.append(transpose) + ops.append(node) + continue + ops.append(node) + ctx.reset_nodes(ops) + + +def topological_sort(g, continue_on_error): + ops = g.get_nodes() + if not continue_on_error: + g.topological_sort(ops) + else: + try: + g.topological_sort(ops) + except: # pylint: disable=bare-except + # if we continue on error, ignore graph cycles so we can report all missing ops + pass + + +def run_rewriters(g, funcs, continue_on_error): + """Rewrite the original graph and body graphs of nodes""" + # NOTE(wayuanho): + # 1. we don't sort graph here, rewriter is expected to do it on its own. + # 2. the graph here may have circles, current topological_sort cannot handle it. + for func in funcs: + try: + ops = func(g, g.get_nodes()) + g.reset_nodes(ops) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + logger.error("rewriter %s: exception %s", func, ex) + ex_ext = traceback.format_exception(type_, value_, traceback_) + if continue_on_error: + logger.info(ex_ext) + else: + raise ex + + if utils.is_debug_mode(): + broken_outputs = g.check_integrity() + if broken_outputs: + logging.error( + "After rewriter %s, graph breaks at outputs %s", + func.__name__, broken_outputs + ) + + if g.contained_graphs: + for dict_val in g.contained_graphs.values(): + for attr_name, b_g in dict_val.items(): + run_rewriters(b_g, funcs, attr_name) + + +def process_tf_graph(tf_graph, continue_on_error=False, verbose=False, target=None, + opset=None, custom_op_handlers=None, custom_rewriter=None, + extra_opset=None, shape_override=None, inputs_as_nchw=None, + input_names=None, output_names=None, ignore_default=None, use_default=None, + is_subgraph=False, const_node_values=None, tensors_to_rename=None, + initialized_tables=None, tflite_path=None, dequantize=False): + """Convert tensorflow graph to onnx graph. + Args: + tf_graph: tensorflow graph + continue_on_error: if an op can't be processed (aka there is no mapping), continue + verbose: print summary stats (deprecated) + target: list of workarounds applied to help certain platforms + opset: the opset to be used (int, default is latest) + custom_op_handlers: dictionary of custom ops handlers + custom_rewriter: list of custom graph rewriters + extra_opset: list of extra opset's, for example the opset's used by custom ops + shape_override: dict with inputs that override the shapes given by tensorflow + inputs_as_nchw: transpose inputs in list from nchw to nhwc + input_names: list of input node names in graph, input name format as node_name:port_id. Optional. + output_names: list of output node names in graph, format is node_name:port_id. Optional for tflite. + ignore_default: list of node names of PlaceholderWithDefault ops to change into Placeholder ops + use_default: list of node names of PlaceholderWithDefault ops to change into Identity ops using the default + const_node_values: a dict returned by compress_graph_def mapping node names to tensor values + tensors_to_rename: an optional dict (string->string) mapping tensor names to new names + initialized_tables: mapping from table shared_names to tuple of keys and values of table + tflite_path: Path to a tflite file to convert. If used, pass None to tf_graph + Return: + onnx graph + """ + # NOTE: process_parsed_graph and Graph are always given tensors post-rename. + # process_tf_graph (this function) gets tensors pre-rename. + if verbose: + logger.warning("Argument verbose for process_tf_graph is deprecated. Please use --verbose option instead.") + del verbose + + opset = utils.find_opset(opset) + if not is_subgraph: + logger.info("Using tensorflow=%s, onnx=%s, tf2onnx=%s/%s", + get_tf_version(), utils.get_onnx_version(), tf2onnx.__version__, tf2onnx.version.git_version[:6]) + logger.info("Using opset ", opset) + if opset > schemas.get_max_supported_opset_version(): + logger.warning("Currently installed onnx package %s is too low to support opset %s, " + "please upgrade onnx package to avoid potential conversion issue.", + utils.get_onnx_version(), opset) + + if shape_override is None: + shape_override = {} + if inputs_as_nchw is None: + inputs_as_nchw = [] + if target is None: + target = constants.DEFAULT_TARGET + + def check_io(input_names, output_names, output_shapes): + io_to_check = [] + if input_names: + io_to_check.extend(input_names) + if output_names: + io_to_check.extend(output_names) + if io_to_check: + # check output existence in case user passed in wrong output ids + non_exists = set(io_to_check) - set(output_shapes.keys()) + if non_exists: + logger.error("\nFailed to convert: inputs/outputs specified do not exist, make sure your passed" + "in format: input/output_node_name:port_id. Problematic inputs/outputs are: %s \n", + non_exists) + raise ValueError("Inputs/Outputs Not Found") + + def rename_tensors_in_dict(d): + if tensors_to_rename is None: + return d + return {tensors_to_rename.get(k, k): v for k, v in d.items()} + + def rename_tensors_in_list(tensors): + if tensors_to_rename is None or tensors is None: + return tensors + return [tensors_to_rename.get(t, t) for t in tensors] + + def rename_tensors_in_nodes(onnx_nodes): + if tensors_to_rename is None: + return + for n in onnx_nodes: + n.input[:] = rename_tensors_in_list(n.input) + n.output[:] = rename_tensors_in_list(n.output) + + if tflite_path is not None: + tflite_graphs, opcodes, model, tensor_shapes = read_tflite_model(tflite_path) + main_g = None + inputs_as_nchw = rename_tensors_in_list(inputs_as_nchw) + for i, tfl_graph in enumerate(tflite_graphs): + is_main_g = i == len(tflite_graphs) - 1 + prefix = '' if is_main_g else tfl_graph.Name().decode() + '_' + tensor_shapes_from_interpreter = None + if is_main_g: + tensor_shapes_from_interpreter = tensor_shapes + onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, f_inputs, f_outputs, graph_name = \ + parse_tflite_graph(tfl_graph, opcodes, model, prefix, tensor_shapes_from_interpreter) + g_inputs = f_inputs + g_outputs = f_outputs + if is_main_g: + # Override IO in main graph + check_io(input_names, output_names, output_shapes) + if input_names is not None: + g_inputs = input_names + if output_names is not None: + g_outputs = output_names + rename_tensors_in_nodes(onnx_nodes) + g_inputs = rename_tensors_in_list(g_inputs) + g_outputs = rename_tensors_in_list(g_outputs) + output_shapes = rename_tensors_in_dict(output_shapes) + dtypes = rename_tensors_in_dict(dtypes) + g = Graph(onnx_nodes, output_shapes, dtypes, target, opset, extra_opset, g_inputs, g_outputs, is_subgraph) + fg = process_parsed_graph(g, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter, target, + g_outputs, {}, {}, {}, op_cnt, attr_cnt, is_tflite=True, dequantize=dequantize) + fg.graph_name = graph_name + if is_main_g: + main_g = fg + else: + set_function(graph_name, fg) + + return main_g + + is_func = is_function(tf_graph) + if not is_func: + tf_graph = infer_shape(tf_graph, shape_override) + + outputs_to_values, outputs_to_dtypes = compute_const_folding_using_tf(tf_graph, const_node_values, output_names) + + onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, _ = \ + tensorflow_to_onnx(tf_graph, shape_override, const_node_values, ignore_default, use_default) + if not is_subgraph: + # make tf2onnx internal subgraphs from the tensorflow subgraphs + ordered_func = resolve_functions(tf_graph) + for func in ordered_func: + f_inputs_names = [t.name for t in func.inputs] + f_output_names = [t.name for t in func.outputs] + f_inputs_names = rename_tensors_in_list(f_inputs_names) + f_output_names = rename_tensors_in_list(f_output_names) + fg = process_tf_graph(func, continue_on_error, False, target, opset, + custom_op_handlers, custom_rewriter, + extra_opset, shape_override, inputs_as_nchw, + f_inputs_names, f_output_names, is_subgraph=True, + const_node_values=const_node_values, tensors_to_rename=tensors_to_rename, + initialized_tables=initialized_tables) + fg.graph_name = func.name + set_function(func.name, fg) + + check_io(input_names, output_names, output_shapes) + + rename_tensors_in_nodes(onnx_nodes) + input_names = rename_tensors_in_list(input_names) + output_names = rename_tensors_in_list(output_names) + output_shapes = rename_tensors_in_dict(output_shapes) + dtypes = rename_tensors_in_dict(dtypes) + inputs_as_nchw = rename_tensors_in_list(inputs_as_nchw) + g = Graph(onnx_nodes, output_shapes, dtypes, target, opset, extra_opset, input_names, output_names, is_subgraph) + g = process_parsed_graph(g, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter, target, + output_names, initialized_tables, outputs_to_values, outputs_to_dtypes, op_cnt, attr_cnt) + return g + + +def process_parsed_graph(g, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter, target, + output_names, initialized_tables, outputs_to_values, outputs_to_dtypes, op_cnt, attr_cnt, + is_tflite=False, dequantize=False): + + if is_tflite: + tfl_rewriters = [] + if dequantize: + tfl_rewriters.append(rewrite_tfl_qdq) + tfl_rewriters.append(rewrite_tfl_scan_outputs) + run_rewriters(g, tfl_rewriters, continue_on_error) + tfl_ops_mapping = handler.tfl_op.create_tfl_to_tf_mapping() + _, _, exceptions = tensorflow_onnx_mapping(g, tfl_ops_mapping, is_tflite=True, dequantize=False) + if exceptions and not continue_on_error: + raise exceptions[0] + + # create ops mapping for the desired opsets + ops_mapping = handler.tf_op.create_mapping(g.opset, g.extra_opset) + + # apply custom ops on top of the assembled opset. We can either complement the opset + # or override existing ops with a custom op. + if custom_op_handlers is not None: + # below is a bit tricky since there are a few api's: + # 1. the future way we want custom ops to be registered with the @tf_op decorator. THose handlers will be + # registered via the decorator on load of the module ... nothing is required here. + # 2. the old custom op api: a dictionary of {name: (func, args[]) + # We deal with this by using a compat_handler that wraps to old handler with a new style handler. + # This is tempoary to give people give to move to the new api and after tf2onnx-1.5 we want to remove this + custom_opset = {} + for k, v in custom_op_handlers.items(): + # FIXME: remove this after tf2onnx-1.5 + def compat_handler(ctx, node, **kwargs): + # wrap old handler + name = node.name + args = kwargs["args"] + func = kwargs["func"] + return func(ctx, node, name, args) + + args = v[1] + kwargs = {"func": v[0]} + if args: + onnx_op = args[0] + kwargs["onnx_op"] = onnx_op + args = args[1:] + kwargs["args"] = args + new_handler = handler.tf_op(k, + domain=constants.TENSORFLOW_OPSET.domain, + kwargs=kwargs) + new_handler.register_compat_handler(compat_handler, 1) + custom_opset[k] = (compat_handler, kwargs) + ops_mapping.update(custom_opset) + + if inputs_as_nchw: + transpose_inputs(g, inputs_as_nchw) + + fold_constants_using_tf(g, outputs_to_values, outputs_to_dtypes) + + # pre-processing graph rewrites + # bi-directional re-writer should be placed after single directional re-writer + rewriters = [ + # single directional + rewrite_constant_fold, + rewrite_quantize_and_dequantize, + rewrite_transpose, + rewrite_flatten, + rewrite_random_uniform, + rewrite_random_uniform_fold_const, + rewrite_random_normal, + rewrite_dropout, + rewrite_eye, + rewrite_leakyrelu, + rewrite_thresholded_relu, + rewrite_conv2d_with_pad, + rewrite_single_direction_lstm, + # bi-directional + rewrite_bi_direction_lstm, + rewrite_single_direction_gru, + rewrite_bi_direction_gru, + rewrite_custom_rnn_cell, + rewrite_generic_loop, rewrite_cond, + rewrite_biasadd_with_conv2d, + rewrite_layer_normalization, + rewrite_gemm, + ] + + if custom_rewriter is not None: + rewriters.extend(custom_rewriter) + + run_rewriters(g, rewriters, continue_on_error) + + # some nodes may already copied into inner Graph, so remove them from main Graph. + g.delete_unused_nodes(output_names) + topological_sort(g, continue_on_error) + + mapped_op, unmapped_op, exceptions = \ + tensorflow_onnx_mapping(g, ops_mapping, initialized_tables, dequantize=dequantize) + if unmapped_op: + logger.error("Unsupported ops: %s", unmapped_op) + if exceptions and not continue_on_error: + raise exceptions[0] + + # post-processing rewriters + late_rewriters = [] + if constants.TARGET_RS5 in target: + late_rewriters.append(rewrite_incomplete_type_support_rs5) + if constants.TARGET_RS6 in target: + late_rewriters.append(rewrite_incomplete_type_support_rs6) + if late_rewriters: + run_rewriters(g, late_rewriters, continue_on_error) + + # onnx requires topological sorting + topological_sort(g, continue_on_error) + + g.update_proto() + + logger.verbose( + "Summay Stats:\n" + "\ttensorflow ops: {}\n" + "\ttensorflow attr: {}\n" + "\tonnx mapped: {}\n" + "\tonnx unmapped: {}".format(op_cnt, attr_cnt, mapped_op, unmapped_op)) + + return g + + +def tf_optimize(input_names, output_names, graph_def, fold_constant=True): + """optimize tensorflow graph. This is in tf_loader but some apps call this + so we proxy into tf_loader to keep them working.""" + return tf2onnx.tf_loader.tf_optimize(input_names, output_names, graph_def, fold_constant) diff --git a/lib/python3.10/site-packages/tf2onnx/utils.py b/lib/python3.10/site-packages/tf2onnx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..215f0c2928d8d606dd720c1f7a60b865cde96a73 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/utils.py @@ -0,0 +1,507 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tf2onnx.utils - misc utilities for tf2onnx +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import os +import re +import shutil +import tempfile +import zipfile +import logging + +import requests +from requests.adapters import HTTPAdapter +from urllib3.util.retry import Retry +import numpy as np +from google.protobuf import text_format +from onnx import helper, onnx_pb, defs, numpy_helper, ModelProto, __version__ + +from . import constants + + +logger = logging.getLogger(__file__) + + +# +# mapping dtypes from onnx to numpy +# +ONNX_TO_NUMPY_DTYPE = { + onnx_pb.TensorProto.FLOAT: np.float32, + onnx_pb.TensorProto.FLOAT16: np.float16, + onnx_pb.TensorProto.DOUBLE: np.float64, + onnx_pb.TensorProto.INT32: np.int32, + onnx_pb.TensorProto.INT16: np.int16, + onnx_pb.TensorProto.INT8: np.int8, + onnx_pb.TensorProto.UINT8: np.uint8, + onnx_pb.TensorProto.UINT16: np.uint16, + onnx_pb.TensorProto.INT64: np.int64, + onnx_pb.TensorProto.UINT64: np.uint64, + onnx_pb.TensorProto.BOOL: np.bool, + onnx_pb.TensorProto.COMPLEX64: np.complex64, + onnx_pb.TensorProto.COMPLEX128: np.complex128, + onnx_pb.TensorProto.STRING: np.object, +} + +# +# onnx dtype names +# +ONNX_DTYPE_NAMES = { + onnx_pb.TensorProto.FLOAT: "float", + onnx_pb.TensorProto.FLOAT16: "float16", + onnx_pb.TensorProto.DOUBLE: "double", + onnx_pb.TensorProto.INT32: "int32", + onnx_pb.TensorProto.INT16: "int16", + onnx_pb.TensorProto.INT8: "int8", + onnx_pb.TensorProto.UINT8: "uint8", + onnx_pb.TensorProto.UINT16: "uint16", + onnx_pb.TensorProto.INT64: "int64", + onnx_pb.TensorProto.STRING: "string", + onnx_pb.TensorProto.BOOL: "bool", + onnx_pb.TensorProto.COMPLEX64: "complex64", + onnx_pb.TensorProto.COMPLEX128: "complex128" +} + + +class TensorValueInfo(object): + def __init__(self, tensor_id, g): + self.id = tensor_id + if self.id: + self.dtype = g.get_dtype(tensor_id) + self.shape = g.get_shape(tensor_id) + + +ONNX_UNKNOWN_DIMENSION = -1 +ONNX_EMPTY_INPUT = "" + +# index for internally generated names +INTERNAL_NAME = 1 + +# Fake onnx op type which is used for Graph input. +GRAPH_INPUT_TYPE = "NON_EXISTENT_ONNX_TYPE" + + +def make_name(name): + """Make op name for inserted ops.""" + global INTERNAL_NAME + INTERNAL_NAME += 1 + return "{}__{}".format(name, INTERNAL_NAME) + + +def split_nodename_and_shape(name): + """input name with shape into name and shape.""" + # pattern for a node name + inputs = [] + shapes = {} + # input takes in most cases the format name:0, where 0 is the output number + # in some cases placeholders don't have a rank which onnx can't handle so we let uses override the shape + # by appending the same, ie : [1,28,28,3] + name_pattern = r"(?:([\w\d/\-\._:]+)(\[[\-\d,]+\])?),?" + splits = re.split(name_pattern, name) + for i in range(1, len(splits), 3): + inputs.append(splits[i]) + if splits[i + 1] is not None: + shape = [int(n) for n in splits[i + 1][1:-1].split(",")] + shape = [n if n >= 0 else None for n in shape] + shapes[splits[i]] = shape + if not shapes: + shapes = None + return inputs, shapes + + +def map_numpy_to_onnx_dtype(np_dtype): + for onnx_dtype, numpy_dtype in ONNX_TO_NUMPY_DTYPE.items(): + if numpy_dtype == np_dtype: + return onnx_dtype + raise ValueError("unsupported numpy dtype '%s' for mapping to onnx" % np_dtype) + + +def map_onnx_to_numpy_type(onnx_type): + return ONNX_TO_NUMPY_DTYPE[onnx_type] + + +def node_name(name): + """Get node name without io#.""" + pos = name.find(":") + if pos >= 0: + return name[:pos] + return name + + +def make_onnx_shape(shape): + """shape with -1 is not valid in onnx ... make it a name.""" + if shape: + # don't do this if input is a scalar + return [make_name("unk") if i == -1 else i for i in shape] + return shape + + +def port_name(name, nr=0): + """Map node output number to name.""" + return name + ":" + str(nr) + + +def make_onnx_inputs_outputs(name, elem_type, shape, **kwargs): + """Wrapper for creating onnx graph inputs or outputs + name, # type: Text + elem_type, # type: TensorProto.DataType + shape, # type: Optional[Sequence[int]] + """ + if elem_type is None: + elem_type = onnx_pb.TensorProto.UNDEFINED + return helper.make_tensor_value_info( + name, + elem_type, + make_onnx_shape(shape), + **kwargs + ) + + +def find_opset(opset): + """Find opset.""" + if opset is None or opset == 0: + opset = defs.onnx_opset_version() + if opset > constants.PREFERRED_OPSET: + # if we use a newer onnx opset than most runtimes support, default to the one most supported + opset = constants.PREFERRED_OPSET + return opset + + +def save_onnx_model(save_path_root, onnx_file_name, feed_dict, model_proto, include_test_data=False, as_text=False, + external_tensor_storage=None): + """Save onnx model as file. Save a pbtxt file as well if as_text is True""" + save_path = save_path_root + if not os.path.exists(save_path): + os.makedirs(save_path) + + if include_test_data: + data_path = os.path.join(save_path, "test_data_set_0") + if not os.path.exists(data_path): + os.makedirs(data_path) + + i = 0 + for data_key in feed_dict: + data = feed_dict[data_key] + t = numpy_helper.from_array(data) + t.name = data_key + data_full_path = os.path.join(data_path, "input_" + str(i) + ".pb") + save_protobuf(data_full_path, t) + i += 1 + + if external_tensor_storage is None: + target_path = os.path.join(save_path, onnx_file_name + ".onnx") + save_protobuf(target_path, model_proto) + else: + zip_path = os.path.join(save_path, onnx_file_name + ".zip") + save_onnx_zip(zip_path, model_proto, external_tensor_storage) + with zipfile.ZipFile(zip_path, 'r') as z: + z.extractall(save_path) + target_path = os.path.join(save_path, "__MODEL_PROTO.onnx") + + if as_text: + save_protobuf(target_path + ".pbtxt", model_proto, as_text=True) + + return target_path + +def save_onnx_zip(target_path, model_proto, external_tensor_storage): + with zipfile.ZipFile(target_path, 'w') as z: + z.writestr("__MODEL_PROTO.onnx", model_proto.SerializeToString()) + for k, v in external_tensor_storage.name_to_tensor_data.items(): + z.writestr(k, v) + +def make_sure(bool_val, error_msg, *args): + if not bool_val: + raise ValueError("make_sure failure: " + error_msg % args) + + +def construct_graph_from_nodes(parent_g, nodes, outputs, shapes, dtypes): + """Construct Graph from nodes and outputs with specified shapes and dtypes.""" + # pylint: disable=protected-access + g = parent_g.create_new_graph_with_same_config() + g.parent_graph = parent_g + nodes = set(nodes) + all_outputs = set() + for op in nodes: + all_outputs |= set(op.output) + + branches = {} + body_graphs = op.graph.contained_graphs.pop(op.name, None) + if body_graphs: + for attr_name, body_graph in body_graphs.items(): + body_graph.parent_graph = g + branches[attr_name] = body_graph + + _ = g.make_node(op.type, op.input, outputs=op.output, attr=op.attr, name=op.name, + skip_conversion=op.skip_conversion, infer_shape_dtype=False, branches=branches) + + for i in all_outputs: + if i not in g._output_shapes: + g._output_shapes[i] = parent_g._output_shapes[i] + if i not in g._dtypes: + g._dtypes[i] = parent_g._dtypes[i] + + # handle cell graph: insert identity node, since sometimes we need output same output_id + # as state_output and scan_out, but ONNX don't allow the same output_id to appear more + # than once as output node. + new_output_names = [] + for output, shape, dtype in zip(outputs, shapes, dtypes): + node = g.make_node("Identity", inputs=[output], op_name_scope="sub_graph_ending_node", + shapes=[shape], dtypes=[dtype], infer_shape_dtype=False) + new_output_names.append(node.output[0]) + g.outputs = new_output_names + return g + + +def tf_name_scope(name): + return '/'.join(name.split('/')[:-1]) + + +def get_temp_directory(): + return os.environ.get("TF2ONNX_TEMP_DIRECTORY", tempfile.mkdtemp()) + + +def delete_directory(path): + if os.path.exists(path): + shutil.rmtree(path) + + +def save_protobuf(path, message, as_text=False): + dir_name = os.path.dirname(path) + if dir_name: + os.makedirs(dir_name, exist_ok=True) + if as_text: + with open(path, "w") as f: + f.write(text_format.MessageToString(message)) + else: + with open(path, "wb") as f: + f.write(message.SerializeToString()) + +def model_proto_from_file(model_path): + model_proto = ModelProto() + with open(model_path, "rb") as f: + model_proto.ParseFromString(f.read()) + return model_proto + +def model_proto_from_zip(zip_path, external_tensor_storage): + model_proto = ModelProto() + with zipfile.ZipFile(zip_path, 'r') as z: + for n in z.namelist(): + f = z.open(n) + if n.endswith(".onnx"): + model_proto.ParseFromString(f.read()) + else: + external_tensor_storage.name_to_tensor_data[n] = f.read() + return model_proto + +def is_list_or_tuple(obj): + return isinstance(obj, (list, tuple)) + + +def is_unknown_dimension(dim): + """ Return true if dim is not a positive integer value. """ + if dim is None or not isinstance(dim, int): + return True + return dim <= 0 + + +def merge_shapes(shape1, shape2): + """ + Merge 2 shapes, return merged shape, choose more specific dimension value from either side. + Raise exception for mismatch. + """ + if shape1 is None: + return shape2 + if shape2 is None: + return shape1 + + make_sure(is_list_or_tuple(shape1), "invalid type for shape1") + make_sure(is_list_or_tuple(shape2), "invalid type for shape2") + make_sure(len(shape1) == len(shape2), "shapes rank mismatch: shape1=%s, shape2=%s", shape1, shape2) + + merged = [] + for d1, d2 in zip(shape1, shape2): + d = d1 + if is_unknown_dimension(d1): + d = d2 + elif not is_unknown_dimension(d2): + make_sure(d1 == d2, "shapes dimension mismatch: shape1=%s, shape2=%s", shape1, shape2) + merged.append(d) + return merged + + +def are_shapes_compatible(src, dest): + """ + Returns True iff src is compatible with dest. + None is compatible with all shapes, different ranks are not considered as compatible + """ + try: + merge_shapes(src, dest) + return True + except: # pylint: disable=bare-except + return False + + +def are_shapes_equal(src, dest): + """ Check whether 2 shapes are equal. """ + if src is None: + return dest is None + if dest is None: + return src is None + + make_sure(is_list_or_tuple(src), "invalid type for src") + make_sure(is_list_or_tuple(dest), "invalid type for dest") + + if len(src) != len(dest): + return False + return all(i == j for i, j in zip(src, dest)) + + +def create_vague_shape_like(shape): + make_sure(len(shape) >= 0, "rank should be >= 0") + return [-1 for i in enumerate(shape)] + + +def get_onnx_version(): + return __version__ + + +def make_opsetid(domain, version): + make_sure(isinstance(version, int), "version must be an integer") + return helper.make_opsetid(domain, version) + + +def is_onnx_domain(domain): + if domain is None or domain == "": + return True + return False + + +def parse_bool(val): + if val is None: + return False + return val.lower() in ("yes", "true", "t", "y", "1") + + +_is_debug_mode = parse_bool(os.environ.get(constants.ENV_TF2ONNX_DEBUG_MODE)) + + +def is_debug_mode(): + return _is_debug_mode + + +def set_debug_mode(enabled): + global _is_debug_mode + _is_debug_mode = enabled + + +def get_max_value(np_dtype): + return np.iinfo(np_dtype).max + + +def get_min_value(np_dtype): + return np.iinfo(np_dtype).min + + +def get_url(url, path, max_retries=5): + """ Download url and save to path. """ + retries = Retry(total=max_retries, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504]) + adapter = HTTPAdapter(max_retries=retries) + session = requests.Session() + session.mount("http://", adapter) + session.mount("https://", adapter) + + response = session.get(url, allow_redirects=True) + if response.status_code not in [200]: + response.raise_for_status() + + dir_name = os.path.dirname(path) + if dir_name: + os.makedirs(dir_name, exist_ok=True) + + with open(path, "wb") as f: + f.write(response.content) + + +def have_same_inference_value(g, output_1, output_2): + """ + If two outputs have the same value in inference. + Check whether they come from the same subgraph and the same subgraphs + contain nodes with the same attributes and share the same ancestors. + """ + + def is_same(node_1, node_2): + # go further util two instance isn't the same + if node_1 == node_2: + return True + # check body graph + if node_1.get_body_graphs() or node_2.get_body_graphs(): + logger.warning("Comparing two nodes containing body graph isn't supported.") + return False + # check domain + if node_1.domain != node_2.domain: + return False + # check type + if node_1.type != node_2.type: + return False + # check onnx attributes + if node_1.get_onnx_attrs().keys() != node_2.get_onnx_attrs().keys(): + return False + for name in node_1.get_onnx_attrs().keys(): # pylint: disable=consider-iterating-dictionary + if node_1.get_attr_value(name) != node_2.get_attr_value(name): + return False + return True + + if output_1 == output_2: + return True + node_1 = g.get_node_by_output(output_1) + node_2 = g.get_node_by_output(output_2) + # compare their domain, attr, etc. see __eq__ in Node class + if not is_same(node_1, node_2): + return False + + for inp_1, inp_2 in zip(node_1.input, node_2.input): + if not have_same_inference_value(g, inp_1, inp_2): + return False + return True + + +def is_tf_reverse_op(op): + return op.type in ("ReverseV2", "ReverseSequence") + + +def is_tf_concat_op(op): + return op.type in ("Concat", "ConcatV2", "ConcatV3") + + +def is_tf_tensor_array_gather_op(op): + return op.type in ("TensorArrayGatherV2", "TensorArrayGatherV3") + + +def is_tf_tensor_array_write_op(op): + return op.type in ("TensorArrayWriteV2", "TensorArrayWriteV3") + + +def is_tf_tensor_array_op(op): + return op.type in ("TensorArrayV2", "TensorArrayV3") + + +def is_tf_loopcond_op(op): + return op.type == "LoopCond" + + +def is_tf_select_op(op): + return op.type in ("Select", "SelectV2") + + +def is_tf_slice_op(op): + return op.type == "Slice" + + +def is_tf_const_op(op): + return op.type in ["Const", "ConstV2"] diff --git a/lib/python3.10/site-packages/tf2onnx/verbose_logging.py b/lib/python3.10/site-packages/tf2onnx/verbose_logging.py new file mode 100644 index 0000000000000000000000000000000000000000..00f6d5efd08b323f4cc8451968f6f7001bfc14f3 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/verbose_logging.py @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +A wrapper of built-in logging with custom level support and utilities. +""" + +from contextlib import contextmanager +import logging as _logging +from logging import * # pylint: disable=wildcard-import, unused-wildcard-import +import os +import types + +import tensorflow as tf +TF2 = tf.__version__.startswith("2.") + +from . import constants # pylint: disable=wrong-import-position + +VERBOSE = 15 + +_logging.addLevelName(VERBOSE, "VERBOSE") + + +def _verbose(self, message, *args, **kwargs): + if self.isEnabledFor(VERBOSE): + self._log(VERBOSE, message, args, **kwargs) # pylint: disable=protected-access + + +def getLogger(name=None): # pylint: disable=invalid-name, function-redefined + logger = _logging.getLogger(name) + # Inject verbose method to logger object instead logging module + logger.verbose = types.MethodType(_verbose, logger) + return logger + + +_BASIC_LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" +_VERBOSE_LOG_FORMAT = "%(asctime)s - %(levelname)s - %(name)s: %(message)s" + + +def basicConfig(**kwargs): # pylint: disable=invalid-name, function-redefined + """ Do basic configuration for the logging system. tf verbosity is updated accordingly. """ + # Choose pre-defined format if format argument is not specified + if "format" not in kwargs: + level = kwargs.get("level", _logging.root.level) + kwargs["format"] = _BASIC_LOG_FORMAT if level >= INFO else _VERBOSE_LOG_FORMAT + # config will make effect only when root.handlers is empty, so add the following statement to make sure it + _logging.root.handlers = [] + _logging.basicConfig(**kwargs) + set_tf_verbosity(_logging.getLogger().getEffectiveLevel()) + + +_LOG_LEVELS = [FATAL, ERROR, WARNING, INFO, VERBOSE, DEBUG] + + +def get_verbosity_level(verbosity, base_level=INFO): + """ If verbosity is specified, return corresponding level, otherwise, return default_level. """ + if verbosity is None: + return base_level + verbosity = min(max(0, verbosity) + _LOG_LEVELS.index(base_level), len(_LOG_LEVELS) - 1) + return _LOG_LEVELS[verbosity] + + +def set_level(level): + """ Set logging level for tf2onnx package. tf verbosity is updated accordingly. """ + _logging.getLogger(constants.TF2ONNX_PACKAGE_NAME).setLevel(level) + set_tf_verbosity(level) + + +def set_tf_verbosity(level): + """ Set TF logging verbosity.""" + # TF log is too verbose, adjust it + if TF2: + return + + level = ERROR if level >= INFO else level + tf.logging.set_verbosity(level) + + # TF_CPP_MIN_LOG_LEVEL: + # 0 = all messages are logged (default behavior) + # 1 = INFO messages are not printed + # 2 = INFO and WARNING messages are not printed + # 3 = INFO, WARNING, and ERROR messages are not printed + if level <= INFO: + tf_cpp_min_log_level = "0" + elif level <= WARNING: + tf_cpp_min_log_level = "1" + elif level <= ERROR: + tf_cpp_min_log_level = "2" + else: + tf_cpp_min_log_level = "3" + os.environ["TF_CPP_MIN_LOG_LEVEL"] = tf_cpp_min_log_level + + +@contextmanager +def set_scope_level(level, logger=None): + """ + Set logging level to logger within context, reset level to previous value when exit context. + TF verbosity is NOT affected. + """ + if logger is None: + logger = getLogger() + + current_level = logger.level + logger.setLevel(level) + + try: + yield logger + finally: + logger.setLevel(current_level) diff --git a/lib/python3.10/site-packages/tf2onnx/version.py b/lib/python3.10/site-packages/tf2onnx/version.py new file mode 100644 index 0000000000000000000000000000000000000000..a55e6d19c6dd99b59bd5d130d9efc40b17826f13 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/version.py @@ -0,0 +1,3 @@ + +version = '1.8.4' +git_version = 'cd55bf602db65862f7ce77430ac90ae3e8ae0218'