python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thread-local context managers for AutoGraph."""
import enum
import inspect
import threading
from nvidia.dali._autograph.utils import ag_logging
from nvidia.dali._autograph.utils.all_utils import export_symbol
stacks = threading.local()
def _control_ctx():
if not hasattr(stacks, 'control_status'):
stacks.control_status = [_default_control_status_ctx()]
return stacks.control_status
@export_symbol('__internal__.autograph.control_status_ctx', v1=[])
def control_status_ctx():
"""Returns the current control context for autograph.
This method is useful when calling `tf.__internal__.autograph.tf_convert`,
The context will be used by tf_convert to determine whether it should convert
the input function. See the sample usage like below:
```
def foo(func):
return tf.__internal__.autograph.tf_convert(
input_fn, ctx=tf.__internal__.autograph.control_status_ctx())()
```
Returns:
The current control context of autograph.
"""
ret = _control_ctx()[-1]
return ret
class Status(enum.Enum):
UNSPECIFIED = 0
ENABLED = 1
DISABLED = 2
class ControlStatusCtx(object):
"""A context that tracks whether autograph is enabled by the user."""
def __init__(self, status, options=None):
self.status = status
self.options = options
def __enter__(self):
_control_ctx().append(self)
return self
def __repr__(self):
return '{}[status={}, options={}]'.format(
self.__class__.__name__, self.status, self.options)
def __exit__(self, unused_type, unused_value, unused_traceback):
assert _control_ctx()[-1] is self
_control_ctx().pop()
class NullCtx(object):
"""Helper substitute for contextlib.nullcontext."""
def __enter__(self):
pass
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def _default_control_status_ctx():
return ControlStatusCtx(status=Status.UNSPECIFIED)
INSPECT_SOURCE_SUPPORTED = True
try:
inspect.getsource(ag_logging.log)
except OSError:
INSPECT_SOURCE_SUPPORTED = False
ag_logging.warning(
'AutoGraph is not available in this environment: functions lack code'
' information. This is typical of some environments like the interactive'
' Python shell, functions with native bindings or functions created'
' dynamically using `exec` or `eval`. Use `inspect.findsource` to check'
' if the source code is available for the function you are trying to convert.')
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/ag_ctx.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for tests in this module."""
import contextlib
import imp
import inspect
import sys
import unittest
import six
from nvidia.dali._autograph.core import config
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.impl import api
from nvidia.dali._autograph.utils import hooks
def allowlist(f):
"""Helper that marks a callable as allowlisted."""
if 'allowlisted_module_for_testing' not in sys.modules:
allowlisted_mod = imp.new_module('allowlisted_module_for_testing')
sys.modules['allowlisted_module_for_testing'] = allowlisted_mod
config.CONVERSION_RULES = (
(config.DoNotConvert('allowlisted_module_for_testing'),) +
config.CONVERSION_RULES)
f.__module__ = 'allowlisted_module_for_testing'
def is_inside_generated_code():
"""Tests whether the caller is generated code. Implementation-specific."""
frame = inspect.currentframe()
try:
frame = frame.f_back
internal_stack_functions = ('converted_call', '_call_unconverted')
# Walk up the stack until we're out of the internal functions.
while (frame is not None and
frame.f_code.co_name in internal_stack_functions):
frame = frame.f_back
if frame is None:
return False
return 'ag__' in frame.f_locals
finally:
del frame
class TestingTranspiler(api.PyToLib):
"""Testing version that only applies given transformations."""
def __init__(self, converters, ag_overrides, operator_overload=hooks.OperatorBase()):
super(TestingTranspiler, self).__init__(name="autograph", operator_overload=operator_overload)
if isinstance(converters, (list, tuple)):
self._converters = converters
else:
self._converters = (converters,)
self.transformed_ast = None
self._ag_overrides = ag_overrides
def get_extra_locals(self):
retval = super(TestingTranspiler, self).get_extra_locals()
if self._ag_overrides:
modified_ag = imp.new_module('fake_autograph')
modified_ag.__dict__.update(retval['ag__'].__dict__)
modified_ag.__dict__.update(self._ag_overrides)
retval['ag__'] = modified_ag
return retval
def transform_ast(self, node, ctx):
node = self.initial_analysis(node, ctx)
for c in self._converters:
node = c.transform(node, ctx)
self.transformed_ast = node
self.transform_ctx = ctx
return node
class TestCase(unittest.TestCase):
"""Base class for unit tests in this module. Contains relevant utilities."""
@contextlib.contextmanager
def assertPrints(self, expected_result):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
yield
self.assertEqual(out_capturer.getvalue(), expected_result)
finally:
sys.stdout = sys.__stdout__
def transform(self, f, converter_module, include_ast=False, ag_overrides=None,
operator_overload=hooks.OperatorBase()):
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
tr = TestingTranspiler(converter_module, ag_overrides, operator_overload=operator_overload)
transformed, _, _ = tr.transform_function(f, program_ctx)
if include_ast:
return transformed, tr.transformed_ast, tr.transform_ctx
return transformed
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/converter_testing.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logical boolean operators: not, and, or."""
from nvidia.dali._autograph.utils import hooks
def not_(a):
"""Functional form of "not"."""
if hooks._DISPATCH.detect_overload_not_(a):
return hooks._DISPATCH.not_(a)
return _py_not(a)
def _py_not(a):
"""Default Python implementation of the "not_" operator."""
return not a
def and_(a, b):
"""Functional form of "and". Uses lazy evaluation semantics."""
a_val = a()
if hooks._DISPATCH.detect_overload_lazy_and(a_val):
return hooks._DISPATCH.lazy_and(a_val, b)
return _py_lazy_and(a_val, b)
def _py_lazy_and(cond, b):
"""Lazy-eval equivalent of "and" in Python."""
return cond and b()
def or_(a, b):
"""Functional form of "or". Uses lazy evaluation semantics."""
a_val = a()
if hooks._DISPATCH.detect_overload_lazy_or(a_val):
return hooks._DISPATCH.lazy_or(a_val, b)
return _py_lazy_or(a_val, b)
def _py_lazy_or(cond, b):
"""Lazy-eval equivalent of "or" in Python."""
return cond or b()
def eq(a, b):
"""Functional form of "equal"."""
if hooks._DISPATCH.detect_overload_equal(a) or hooks._DISPATCH.detect_overload_equal(b):
return hooks._DISPATCH.equal(a, b)
return _py_equal(a, b)
def _py_equal(a, b):
"""Overload of "equal" that falls back to Python's default implementation."""
return a == b
def not_eq(a, b):
"""Functional form of "not-equal"."""
return not_(eq(a, b))
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/logical.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used to capture Python idioms."""
from nvidia.dali._autograph.utils import hooks
def ld(v):
"""Load variable operator."""
if isinstance(v, Undefined):
return v.read()
if hooks._DISPATCH.detect_overload_ld(v):
return hooks._DISPATCH.ld(v)
return v
def ldu(load_v, name):
"""Load variable operator that returns Undefined when failing to evaluate.
Note: the name ("load or return undefined") is abbreviated to minimize
the amount of clutter in generated code.
This variant of `ld` is useful when loading symbols that may be undefined at
runtime, such as composite symbols, and whether they are defined or not cannot
be determined statically. For example `d['a']` is undefined when `d` is an
empty dict.
Args:
load_v: Lambda that executes the actual read.
name: Human-readable name of the symbol being read.
Returns:
Either the value of the symbol, or Undefined, if the symbol is not fully
defined.
"""
try:
# TODO(mdan): Use locals()/globals() here.
return load_v()
except (KeyError, AttributeError, NameError):
return Undefined(name)
class Undefined(object):
"""Represents an undefined symbol in Python.
This is used to reify undefined symbols, which is required to use the
functional form of loops.
Example:
while n > 0:
n = n - 1
s = n
return s # Runtime error if n == 0
This is valid Python code and will not result in an error as long as n
is positive. The use of this class is to stay as close to Python semantics
as possible for staged code of this nature.
Converted version of the above showing the possible usage of this class:
s = Undefined('s')
init_state = (s,)
s = while_loop(cond, body, init_state)
return s # s is an instance of Undefined if the loop never runs
Attributes:
symbol_name: Text, identifier for the undefined symbol
"""
__slots__ = ('symbol_name',)
def __init__(self, symbol_name):
self.symbol_name = symbol_name
def read(self):
raise UnboundLocalError("'{}' is used before assignment".format(
self.symbol_name))
def __repr__(self):
return self.symbol_name
def __getattribute__(self, name):
try:
# If it's an existing attribute, return it.
return object.__getattribute__(self, name)
except AttributeError:
# Otherwise return Undefined.
return self
def __getitem__(self, i):
return self
# TODO(mdan): Refactor as a RetVal object, aggregating the value and do_return.
class UndefinedReturnValue(object):
"""Represents a return value that is undefined."""
pass
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/variables.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Structures that allow uniform control over the dispatch process."""
import collections
# TODO(mdan): This is where macro override controls fit.
class DispatchContext(collections.namedtuple(
'DispatchContext',
('options',))):
"""Allows passing additional parameters to the specific implementations.
Attributes:
options: Optional dict of extra arguments that may be required by specific
implementations.
"""
def option(self, name):
return self.options[name]
NO_CTX = DispatchContext(options={})
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/dispatch_context.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc.
Note: most of these operators accept pairs of get_state/set_state functions, to
capture mutations that the corresponding code blocks might make. These
mutations only need to be captured when staging the control flow, and they just
work when reverting to Python behavior.
__Examples__
```
while cond:
self.x += i
```
When the functionalized version is executed as a Python loop, it just works:
```
def loop_body():
self.x += i # works as expected for Python loops
```
But it won't work for TF loops:
```
def loop_body():
self.x += i # self.x has the wrong value!
```
get_state/set_state allow piping the mutations through the loop variables as
well, in effect changing the loop body:
```
def loop_body(self_x):
self.x = self_x # self.x now has the proper value
self.x += i # the original block
self_x = self.x # write self.x back into the loop vars
return self_x
self_x = tf.while_loop(...)
self.x = self_x # the result is not properly captured
```
"""
from nvidia.dali._autograph.utils import hooks
# TODO(mdan): Use the custom operator pattern instead of type dispatch.
# An example of this pattern is found in the implementation of distributed
# datasets. Before it can be used though, we need to standardize the interface.
def for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
```
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
```
The state is represented by the variables geo_mean and arith_mean. The
`extra_test`, `body`, `get_state` and `set_state` functions must bind to the
original `geo_mean` and `arith_mean` symbols, using `nonlocal`.
The inputs and outputs of the callables representing the loop blocks are not
explicit - instead, these functions must use nonlocal/global for side effects.
The inputs and outputs are instead controlled by the set_state/get_state
functions.
Args:
iter_: The entity being iterated over.
extra_test: Callable with boolean return type. An additional loop condition.
body: Callable representing the actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing names of the loop variables returned by
get_state.
opts: Optional dict of extra loop parameters.
"""
if hooks._DISPATCH.detect_overload_for_stmt(iter_):
hooks._DISPATCH.for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts)
else:
_py_for_stmt(iter_, extra_test, body, None, None)
def _py_for_stmt(iter_, extra_test, body, get_state, set_state):
"""Overload of for_stmt that executes a Python for loop."""
del get_state, set_state
if extra_test is not None:
if extra_test():
for target in iter_:
body(target)
if not extra_test():
break
else:
for target in iter_:
body(target)
def while_stmt(test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
The inputs and outputs of the callables representing the loop blocks are not
explicit - instead, these functions must use nonlocal/global for side effects.
The inputs and outputs are instead controlled by the set_state/get_state
functions.
Args:
test: Callable with boolean return type. The loop condition.
body: Callable representing the actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing the names of all loop variables.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# Evaluate the initial test once in order to do the dispatch. The evaluation
# is isolated to minimize unwanted side effects.
# TODO(mdan): Do a full iteration - some state types might lower to Tensor.
# with func_graph.FuncGraph('tmp').as_default():
# init_test = test()
init_test = test()
# TensorFlow: Multiple evaluations are acceptable in this case, so we're fine
# with the re-evaluation of `test` that `_tf_while_stmt` will make.
if hooks._DISPATCH.detect_overload_while_stmt(test):
hooks._DISPATCH.while_stmt(test, body, get_state, set_state, symbol_names, opts)
return
# Normal Python: We already consumed one evaluation of `test`; consistently,
# unroll one iteration before dispatching to a normal loop.
# TODO(mdan): Push the "init_test" value via opts into _py_while_stmt?
if not init_test:
return
body()
_py_while_stmt(test, body, get_state, set_state, opts)
def _py_while_stmt(test, body, get_state, set_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts, get_state, set_state
while test():
body()
def if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts):
"""Functional form of an if statement.
The conditional operates on a state, which includes all symbols whose values
are a function of the branch taken.
For example, given the code below that calculates the abs function:
```
x = 1
if x > 0:
x = -x
```
The state is represented by the variable `x`. The `body, `orelse` and
`set_state` functions must bind to the original `x` symbol, using `nonlocal`.
The inputs and outputs of the callables representing the loop blocks are not
explicit - instead, these functions must use nonlocal/global for side effects.
The inputs and outputs are instead controlled by the set_state/get_state
functions.
Args:
cond: Boolean.
body: Callable representing the main block of the conditional.
orelse: Callable representing the else block of the conditional.
get_state: Function that returns a tuple containing the values of all
composite symbols modified within the conditional. This allows access to
state that branches may mutate through side effects. This function is not
needed and should not be called when dispatching to code matching Python's
default semantics. This is useful for checkpointing to avoid unintended
side-effects when staging requires evaluating all code-paths.
set_state: Function to set the values of all composite symbols modified
within the conditional. This is the complement to get_state, used to
restore checkpointed values. The single argument a tuple containing values
for each composite symbol that may be modified in a branch of the
conditional. The is usually the result of a call to get_state.
symbol_names: Tuple containing basic loop var names.
nouts: Number of variables output by the statement. Vars which are not
outputs will not be passed through staged control flow such as tf.cond.
This includes variables that are defined before the conditional, but are
not used after it.
"""
# Note: tf.cond doesn't support SparseTensor.
if hooks._DISPATCH.detect_overload_if_stmt(cond):
return hooks._DISPATCH.if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts)
else:
_py_if_stmt(cond, body, orelse)
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/control_flow.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module implements operators that AutoGraph overloads.
Note that "operator" is used loosely here, and includes control structures like
conditionals and loops, implemented in functional form, using for example
closures for the body.
"""
# Naming conventions:
# * operator names match the name usually used for the respective Python
# idiom; examples: for_stmt, list_append
# * operator arguments match either of:
# - the corresponding Python AST attribute (e.g. the condition of an if
# statement is called test) if the operator represents an AST construct
# - the names used in the Python docs, if the operator is a function (e.g.
# list_ and x for append, see
# https://docs.python.org/3.7/tutorial/datastructures.html)
#
# All operators may accept a final argument named "opts", of a type that
# subclasses namedtuple and contains any arguments that are only required
# for some specializations of the operator.
from nvidia.dali._autograph.operators.conditional_expressions import if_exp
from nvidia.dali._autograph.operators.control_flow import for_stmt
from nvidia.dali._autograph.operators.control_flow import if_stmt
from nvidia.dali._autograph.operators.control_flow import while_stmt
from nvidia.dali._autograph.operators.data_structures import list_append
from nvidia.dali._autograph.operators.data_structures import list_pop
from nvidia.dali._autograph.operators.data_structures import list_stack
from nvidia.dali._autograph.operators.data_structures import ListPopOpts
from nvidia.dali._autograph.operators.data_structures import ListStackOpts
from nvidia.dali._autograph.operators.data_structures import new_list
from nvidia.dali._autograph.operators.exceptions import assert_stmt
from nvidia.dali._autograph.operators.logical import and_
from nvidia.dali._autograph.operators.logical import eq
from nvidia.dali._autograph.operators.logical import not_
from nvidia.dali._autograph.operators.logical import not_eq
from nvidia.dali._autograph.operators.logical import or_
from nvidia.dali._autograph.operators.py_builtins import float_
from nvidia.dali._autograph.operators.py_builtins import int_
from nvidia.dali._autograph.operators.py_builtins import len_
from nvidia.dali._autograph.operators.py_builtins import print_
from nvidia.dali._autograph.operators.py_builtins import range_
from nvidia.dali._autograph.operators.slices import get_item
from nvidia.dali._autograph.operators.slices import GetItemOpts
from nvidia.dali._autograph.operators.slices import set_item
from nvidia.dali._autograph.operators.variables import ld
from nvidia.dali._autograph.operators.variables import ldu
from nvidia.dali._autograph.operators.variables import Undefined
from nvidia.dali._autograph.operators.variables import UndefinedReturnValue
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators corresponding to Python builtin functions.
List of built-in functions: https://docs.python.org/3/library/functions.html
"""
import inspect
from nvidia.dali._autograph.utils import hooks
UNSPECIFIED = object()
def overload_of(f):
if f in SUPPORTED_BUILTINS:
return BUILTIN_FUNCTIONS_MAP[f.__name__]
return f
def _find_originating_frame(caller_fn_scope, innermost=True):
"""Locates the frame in which `caller_fn_scope` was defined."""
ctx_frame = inspect.currentframe()
result = None
while ctx_frame is not None:
# Note it should not be normally possible to get false positives this way
# because the function scope object is not accessible to user code (barring
# call stack introspection).
if ctx_frame.f_locals.get(caller_fn_scope.name, None) is caller_fn_scope:
result = ctx_frame
if innermost:
break
ctx_frame = ctx_frame.f_back
assert result is not None, (
'the conversion process should ensure the caller_fn_scope is always'
' found somewhere on the call stack')
return result
def locals_in_original_context(caller_fn_scope):
"""Executes the locals function in the context of a specified function."""
return _find_originating_frame(caller_fn_scope, innermost=True).f_locals
def globals_in_original_context(caller_fn_scope):
"""Executes the locals function in the context of a specified function."""
return _find_originating_frame(caller_fn_scope, innermost=True).f_globals
def eval_in_original_context(f, args, caller_fn_scope):
"""Executes the eval function in the context of a specified function."""
# When control flow is rewritten using functions, eval should use the
# variables found in the same block where it was called. That is equivalent
# to the innermost function call.
ctx_frame = _find_originating_frame(caller_fn_scope, innermost=True)
args = (
args[0],
ctx_frame.f_globals if len(args) < 2 else args[1],
ctx_frame.f_locals if len(args) < 3 else args[2],
)
return f(*args)
def super_in_original_context(f, args, caller_fn_scope):
"""Executes the super function in the context of a specified function.
See https://docs.python.org/3/library/functions.html#super for the exact
details
Args:
f: Callable, typically the super builtin
args: List[Any], the original call arguments
caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
scope of the converted function in which this call was originally made
Returns:
The result of calling `f` as if it was called in the frame indicated by
`caller_fn_scope`.
"""
# Only the no-arg call is desugared.
if args:
return f(*args)
# Inner functions seem to include their closure in f_locals, so we need
# to find the outermost frame.
ctx_frame = _find_originating_frame(caller_fn_scope, innermost=False)
# When super(..) is called without arguments, it looks for __class__ cell
# variable and the first argument passed in the enclosing function according
# to the spec https://www.python.org/dev/peps/pep-3135/ .
#
# We couldn't verify if `inspect.currentframe().f_code.co_varnames[0]` is
# guaranteed to be the first argument from an official doc or PEP, however,
# it's fairly stable and well established:
# - An unofficial community doc mentions it.
# https://python-reference.readthedocs.io/en/latest/docs/code/varnames.html
# - CPython has tests checking that order, which was merged in 2008, and
# unchanged since then.
# https://github.com/python/cpython/blame/2f224a077a83ac9de8a12bb7dcc516642b8176d8/Lib/lib2to3/tests/data/py2_test_grammar.py#L157
# https://github.com/python/cpython/blame/2f224a077a83ac9de8a12bb7dcc516642b8176d8/Lib/lib2to3/tests/data/py3_test_grammar.py#L192
#
# Note: the name can be more reliably obtained by inspecting the calling
# function's argspec.
#
# Even though methods can be declared using *args (def method(*args)),
# that pattern is disallowed by super() -- it raises super() no arguments.
# Method definitions using **kwargs are not allowed at all.
# In other words, we can always assume that self is on the first positional
# argument (for correct code).
#
# TODO(mdan): Consider additional checks in case the input code is incorrect.
# For example, the error might be cryptic compared to what super() regularly
# raises.
type_arg = ctx_frame.f_locals['__class__']
self_arg_name = ctx_frame.f_code.co_varnames[0]
self_arg = ctx_frame.f_locals[self_arg_name]
return f(type_arg, self_arg)
def abs_(x):
if hooks._DISPATCH.detect_overload_abs_(x):
return hooks._DISPATCH.abs_(x)
return _py_abs(x)
def _py_abs(x):
return abs(x)
def float_(x=0):
if hooks._DISPATCH.detect_overload_float_(x):
return hooks._DISPATCH.float_(x)
return _py_float(x)
def _py_float(x):
return float(x)
def int_(x=0, base=UNSPECIFIED):
if hooks._DISPATCH.detect_overload_int_(x):
return hooks._DISPATCH.int_(x, base)
return _py_int(x, base)
def _py_int(x, base):
if base is UNSPECIFIED:
return int(x)
return int(x, base)
def len_(s):
if hooks._DISPATCH.detect_overload_len_(s):
return hooks._DISPATCH.len_(s)
return _py_len(s)
def _py_len(s):
return len(s)
def print_(*objects, **kwargs):
"""Overload of the print builtin."""
# Note: Python 2.6 doesn't support explicit keywords after starargs.
unknown_kwargs = tuple(
set(kwargs.keys()) - set(('sep', 'end', 'file', 'flush')))
if unknown_kwargs:
raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs))
if hooks._DISPATCH.detect_overload_print_(objects):
return hooks._DISPATCH.print_(objects, kwargs)
else:
_py_print(*objects, **kwargs)
def _py_print(*objects, **kwargs):
print(*objects, **kwargs)
def min_(*args, **kwargs):
if hooks._DISPATCH.detect_overload_min_(args):
return hooks._DISPATCH.min_(*args, **kwargs)
return _py_min(*args, **kwargs)
def _py_min(*args, **kwargs):
return min(*args, **kwargs)
def max_(*args, **kwargs):
if hooks._DISPATCH.detect_overload_max_(args):
return hooks._DISPATCH.max_(*args, **kwargs)
return _py_max(*args, **kwargs)
def _py_max(*args, **kwargs):
return max(*args, **kwargs)
def range_(start_or_stop, stop=UNSPECIFIED, step=UNSPECIFIED):
if hooks._DISPATCH.detect_overload_range_(start_or_stop, stop, step):
return hooks._DISPATCH.range_(start_or_stop, stop, step)
return _py_range(start_or_stop, stop, step)
def _py_range(start_or_stop, stop, step):
if step is not UNSPECIFIED:
return range(start_or_stop, stop, step)
if stop is not UNSPECIFIED:
return range(start_or_stop, stop)
return range(start_or_stop)
def enumerate_(s, start=0):
if hooks._DISPATCH.detect_overload_enumerate_(s):
return hooks._DISPATCH.enumerate_(s, start)
return _py_enumerate(s, start)
def _py_enumerate(s, start=0):
return enumerate(s, start)
def zip_(*iterables):
if hooks._DISPATCH.detect_overload_zip_(iterables):
return hooks._DISPATCH.zip_(*iterables)
return _py_zip(*iterables)
def _py_zip(*iterables):
return zip(*iterables)
def map_(fn, *iterables):
if hooks._DISPATCH.detect_overload_map_(iterables):
return hooks._DISPATCH.map_(fn, *iterables)
return _py_map(fn, *iterables)
def _py_map(fn, *iterables):
return map(fn, *iterables)
def next_(iterator, default=UNSPECIFIED):
if hooks._DISPATCH.detect_overload_next_(iterator):
return hooks._DISPATCH.next_(iterator, default)
return next_py(iterator, default)
def next_py(iterator, default=UNSPECIFIED):
if default is UNSPECIFIED:
return next(iterator)
return next(iterator, default)
def filter_(function, iterable):
if hooks._DISPATCH.detect_overload_filter_(iterable):
return hooks._DISPATCH.filter_(function, iterable)
return _py_filter(function, iterable)
def _py_filter(function, iterable):
return filter(function, iterable)
def any_(iterable):
if hooks._DISPATCH.detect_overload_any_(iterable):
return hooks._DISPATCH.any_(iterable)
return _py_any(iterable)
def _py_any(iterable):
return any(iterable)
def all_(iterable):
if hooks._DISPATCH.detect_overload_all_(iterable):
return hooks._DISPATCH.all_(iterable)
return _py_all(iterable)
def _py_all(iterable):
return all(iterable)
def sorted_(iterable, key=UNSPECIFIED, reverse=UNSPECIFIED):
if hooks._DISPATCH.detect_overload_sorted_(iterable):
return hooks._DISPATCH.sorted_(iterable, key, reverse)
return _py_sorted(iterable, key, reverse)
def _py_sorted(iterable, key, reverse):
if key is not UNSPECIFIED and reverse is UNSPECIFIED:
return sorted(iterable, key=key)
if key is UNSPECIFIED and reverse is not UNSPECIFIED:
return sorted(iterable, reverse=reverse)
if key is not UNSPECIFIED and reverse is not UNSPECIFIED:
return sorted(iterable, key=key, reverse=reverse)
return sorted(iterable)
SUPPORTED_BUILTINS = (abs, float, int, len, print, range, enumerate, zip, map,
filter, any, all, sorted)
BUILTIN_FUNCTIONS_MAP = {
'abs': abs_,
'any': any_,
'all': all_,
'enumerate': enumerate_,
'filter': filter_,
'float': float_,
'int': int_,
'len': len_,
'map': map_,
'next': next_,
'print': print_,
'range': range_,
'sorted': sorted_,
'zip': zip_,
}
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/py_builtins.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators specific to data structures: list append, subscripts, etc."""
import collections
from nvidia.dali._autograph.utils import hooks
# TODO(mdan): Once control flow supports objects, repackage as a class.
def new_list(iterable=None):
"""The list constructor.
Args:
iterable: Optional elements to fill the list with.
Returns:
A list-like object. The exact return value depends on the initial elements.
"""
# TODO(klecki): DALI would fail here, as DataNode is explicitly non-convertible to boolean.
# We probably need to revert this idiom.
if hooks._DISPATCH.detect_overload_list_new(iterable):
return hooks._DISPATCH.list_new(iterable)
if iterable:
elements = tuple(iterable)
else:
elements = ()
if elements:
# When the list contains elements, it is assumed to be a "Python" lvalue
# list.
return _py_list_new(elements)
# Empty list creation
return hooks._DISPATCH.list_new(elements)
def _py_list_new(elements):
"""Overload of new_list that creates a Python list."""
return list(elements)
def list_append(list_, x):
"""The list append function.
Note: it is unspecified where list_ will be mutated or not. If list_ is
a TensorFlow entity, it will not be typically mutated. If list_ is a plain
list, it will be. In general, if the list is mutated then the return value
should point to the original entity.
Args:
list_: An entity that supports append semantics.
x: The element to append.
Returns:
Same as list_, after the append was performed.
Raises:
ValueError: if list_ is not of a known list-like type.
"""
if hooks._DISPATCH.detect_overload_list_append(list_):
return hooks._DISPATCH.list_append(list_, x)
else:
return _py_list_append(list_, x)
def _py_list_append(list_, x):
"""Overload of list_append that executes a Python list append."""
# Revert to the original call.
list_.append(x)
return list_
class ListPopOpts(
collections.namedtuple('ListPopOpts', ('element_dtype', 'element_shape'))):
pass
def list_pop(list_, i, opts):
"""The list pop function.
Note: it is unspecified where list_ will be mutated or not. If list_ is
a TensorFlow entity, it will not be typically mutated. If list_ is a plain
list, it will be. In general, if the list is mutated then the return value
should point to the original entity.
Args:
list_: An entity that supports pop semantics.
i: Optional index to pop from. May be None.
opts: A ListPopOpts.
Returns:
Tuple (x, out_list_):
out_list_: same as list_, after the removal was performed.
x: the removed element value.
Raises:
ValueError: if list_ is not of a known list-like type or the operation is
not supported for that type.
"""
assert isinstance(opts, ListPopOpts)
if hooks._DISPATCH.detect_overload_list_pop(list_):
return hooks._DISPATCH.list_pop(list_, i)
else:
return _py_list_pop(list_, i)
def _py_list_pop(list_, i):
"""Overload of list_pop that executes a Python list append."""
if i is None:
x = list_.pop()
else:
x = list_.pop(i)
return list_, x
# TODO(mdan): Look into reducing duplication between all these containers.
class ListStackOpts(
collections.namedtuple('ListStackOpts',
('element_dtype', 'original_call'))):
pass
# TODO(klecki): Just remove this from code generation? It's TF-specific extension
def list_stack(list_, opts):
"""The list stack function.
This does not have a direct correspondent in Python. The closest idiom to
this is tf.append or np.stack. It's different from those in the sense that it
accepts a Tensor list, rather than a list of tensors. It can also accept
TensorArray. When the target is anything else, the dispatcher will rely on
ctx.original_call for fallback.
Args:
list_: An entity that supports append semantics.
opts: A ListStackOpts object.
Returns:
The output of the stack operation, typically a Tensor.
"""
assert isinstance(opts, ListStackOpts)
if hooks._DISPATCH.detect_overload_list_stack(list_):
return hooks._DISPATCH.list_stack(list_, opts)
else:
return _py_list_stack(list_, opts)
def _py_list_stack(list_, opts):
"""Overload of list_stack that executes a Python list append."""
# Revert to the original call.
return opts.original_call(list_)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/data_structures.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception handling statements: assert, etc."""
import inspect
from nvidia.dali._autograph.utils import hooks
def assert_stmt(expression1, expression2):
"""Functional form of an assert statement.
This follows the semantics of the Python assert statement, however the
concrete implementations may deviate from it. See the respective
implementation for details.
In general, the assert statement should not be used for control flow.
Furthermore, it is encouraged that the assertion expressions should not have
side effects.
Args:
expression1: Any
expression2: Callable[[], Any], returns the expression to include in the
error message when expression1 evaluates to False. When expression1 is
True, the result of expression2 will not be evaluated, however,
expression2 itself may be evaluated in some implementations.
Returns:
Any, implementation-dependent.
Raises:
ValueError: if any arguments are illegal.
"""
if not callable(expression2):
raise ValueError('{} must be a callable'.format(expression2))
args, _, keywords, _ = inspect.getargspec(expression2)
if args or keywords:
raise ValueError('{} may not have any arguments'.format(expression2))
if hooks._DISPATCH.detect_overload_assert_stmt(expression1):
return hooks._DISPATCH.assert_stmt(expression1, expression2)
else:
return _py_assert_stmt(expression1, expression2)
def _py_assert_stmt(expression1, expression2):
"""Overload of assert_stmt that executes a Python assert statement."""
assert expression1, expression2()
return None
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/exceptions.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators specific to slicing operations."""
import collections
from nvidia.dali._autograph.utils import hooks
# TODO(mdan): Support extended slices.
class GetItemOpts(collections.namedtuple('GetItemOpts', ('element_dtype',))):
pass
def get_item(target, i, opts):
"""The slice read operator (i.e. __getitem__).
Note: it is unspecified whether target will be mutated or not. In general,
if target is mutable (like Python lists), it will be mutated.
Args:
target: An entity that supports getitem semantics.
i: Index to read from.
opts: A GetItemOpts object.
Returns:
The read element.
Raises:
ValueError: if target is not of a supported type.
"""
assert isinstance(opts, GetItemOpts)
if hooks._DISPATCH.detect_overload_get_item(target):
return hooks._DISPATCH.get_item(target, i)
else:
return _py_get_item(target, i)
def _py_get_item(target, i):
"""Overload of get_item that executes a Python list modification."""
return target[i]
def set_item(target, i, x):
"""The slice write operator (i.e. __setitem__).
Note: it is unspecified whether target will be mutated or not. In general,
if target is mutable (like Python lists), it will be mutated.
Args:
target: An entity that supports setitem semantics.
i: Index to modify.
x: The new element value.
Returns:
Same as target, after the update was performed.
Raises:
ValueError: if target is not of a supported type.
"""
if hooks._DISPATCH.detect_overload_set_item(target):
return hooks._DISPATCH.set_item(target, i, x)
else:
return _py_set_item(target, i, x)
def _py_set_item(target, i, x):
"""Overload of set_item that executes a Python list modification."""
target[i] = x
return target
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/slices.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conditional expressions (e.g. the ternary if statement)."""
from nvidia.dali._autograph.utils import hooks
def if_exp(cond, if_true, if_false, expr_repr):
if hooks._DISPATCH.detect_overload_if_exp(cond):
return hooks._DISPATCH.if_exp(cond, if_true, if_false, expr_repr)
else:
return _py_if_exp(cond, if_true, if_false)
def _py_if_exp(cond, if_true, if_false):
return if_true() if cond else if_false()
|
DALI-main
|
dali/python/nvidia/dali/_autograph/operators/conditional_expressions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lowers list comprehensions into for and if statements.
Example:
result = [x * x for x in xs]
becomes
result = []
for x in xs:
elt = x * x
result.append(elt)
"""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import templates
# TODO(mdan): This should covert directly to operator calls.
class ListCompTransformer(converter.Base):
"""Lowers list comprehensions into standard control flow."""
def visit_Assign(self, node):
if not isinstance(node.value, gast.ListComp):
return self.generic_visit(node)
if len(node.targets) > 1:
raise NotImplementedError('multiple assignments')
target, = node.targets
list_comp_node = node.value
template = """
target = []
"""
initialization = templates.replace(template, target=target)
template = """
target.append(elt)
"""
body = templates.replace(template, target=target, elt=list_comp_node.elt)
for gen in reversed(list_comp_node.generators):
for gen_if in reversed(gen.ifs):
template = """
if test:
body
"""
body = templates.replace(template, test=gen_if, body=body)
template = """
for target in iter_:
body
"""
body = templates.replace(
template, iter_=gen.iter, target=gen.target, body=body)
return initialization + body
def transform(node, ctx):
return ListCompTransformer(ctx).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/list_comprehensions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts function definitions and lambdas by adding necessary boilerplate."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import annos
class _Function(object):
def __init__(self):
self.context_name = None
class FunctionTransformer(converter.Base):
"""Wraps function bodies around autograph-specific boilerplate."""
def _function_scope_options(self, fn_scope):
"""Returns the options with which to create function scopes."""
# Top-level function receive the options that were directly requested.
# All others receive the options corresponding to a recursive conversion.
# Note: this mainly controls the user_requested flag, which is important
# primarily because the FunctionScope context also creates a
# ControlStatusCtx(autograph=ENABLED) when user_requested is True. See
# function_wrappers.py.
if fn_scope.level == 2:
return self.ctx.user.options
return self.ctx.user.options.call_options()
def visit_Lambda(self, node):
with self.state[_Function] as fn_scope:
node = self.generic_visit(node)
# TODO(mdan): Fix the tests so that we can always add this decorator.
if fn_scope.level > 2:
return templates.replace_as_expression(
'ag__.autograph_artifact(l)', l=node)
scope = anno.getanno(node, anno.Static.SCOPE)
function_context_name = self.ctx.namer.new_symbol('lscope',
scope.referenced)
fn_scope.context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
template = """
ag__.with_function_scope(
lambda function_context: body, function_context_name, options)
"""
node.body = templates.replace_as_expression(
template,
options=self._function_scope_options(fn_scope).to_ast(),
function_context=function_context_name,
function_context_name=gast.Constant(function_context_name, kind=None),
body=node.body)
return node
def visit_FunctionDef(self, node):
with self.state[_Function] as fn_scope:
scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
function_context_name = self.ctx.namer.new_symbol('fscope',
scope.referenced)
fn_scope.context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
node = self.generic_visit(node)
if fn_scope.level <= 2:
# Top-level functions lose their decorator because the conversion is
# always just-in-time and by the time it happens the decorators are
# already set to be applied.
node.decorator_list = []
else:
# TODO(mdan): Fix the tests so that we can always add this decorator.
# Inner functions are converted already, so we insert a decorator to
# prevent double conversion. Double conversion would work too, but this
# saves the overhead.
node.decorator_list.append(
parser.parse_expression('ag__.autograph_artifact'))
docstring_node = None
if node.body:
first_statement = node.body[0]
if (isinstance(first_statement, gast.Expr) and
isinstance(first_statement.value, gast.Constant)):
docstring_node = first_statement
node.body = node.body[1:]
template = """
with ag__.FunctionScope(
function_name, context_name, options) as function_context:
body
"""
wrapped_body = templates.replace(
template,
function_name=gast.Constant(node.name, kind=None),
context_name=gast.Constant(function_context_name, kind=None),
options=self._function_scope_options(fn_scope).to_ast(),
function_context=function_context_name,
body=node.body)
if docstring_node is not None:
wrapped_body = [docstring_node] + wrapped_body
node.body = wrapped_body
return node
def transform(node, ctx):
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
return FunctionTransformer(ctx).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/functions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for list operations.
This includes converting Python lists to TensorArray/TensorList.
"""
# TODO(mdan): Elaborate the logic here.
# TODO(mdan): Does it even make sense to attempt to try to use TAs?
# The current rule (always convert to TensorArray) is naive and insufficient.
# In general, a better mechanism could look like:
# * convert to TensorList by default
# * leave as Python list if the user explicitly forbids it
# * convert to TensorArray only when complete write once behavior can be
# guaranteed (e.g. list comprehensions)
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.lang import directives
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis.annos import NodeAnno
class _Statement(object):
def __init__(self):
self.pop_uses = None
class ListTransformer(converter.Base):
"""Converts lists and related operations to their TF counterpart."""
def visit_List(self, node):
node = self.generic_visit(node)
template = """
ag__.new_list(elements)
"""
return templates.replace_as_expression(template, elements=node)
def _replace_append_call(self, node):
assert len(node.args) == 1
assert isinstance(node.func, gast.Attribute)
template = """
target = ag__.list_append(target, element)
"""
return templates.replace(
template,
target=node.func.value,
element=node.args[0])
def _replace_pop_call(self, node):
# Expressions that use pop() are converted to a statement + expression.
#
# For example:
#
# print(target.pop())
#
# ... is converted to:
#
# target, target_pop = ag__.list_pop(target)
# print(target_pop)
#
# Here, we just generate the variable name and swap it in,
# and _generate_pop_operation will handle the rest.
#
# Multiple uses of pop() are allowed:
#
# print(tartget.pop(), target.pop())
# print(tartget.pop().pop())
#
assert isinstance(node.func, gast.Attribute)
scope = anno.getanno(node, NodeAnno.ARGS_SCOPE)
target_node = node.func.value
# Attempt to use a related name if one exists. Otherwise use something
# generic.
if anno.hasanno(target_node, anno.Basic.QN):
target_name = anno.getanno(target_node, anno.Basic.QN).ssf()
else:
target_name = 'list_'
pop_var_name = self.ctx.namer.new_symbol(target_name, scope.referenced)
stmt = self.state[_Statement]
if stmt.pop_uses is None:
stmt.pop_uses = []
stmt.pop_uses.append((node, pop_var_name))
return templates.replace_as_expression('var_name', var_name=pop_var_name)
def _replace_stack_call(self, node):
assert len(node.args) == 1
dtype = self.get_definition_directive(
node.args[0],
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.list_stack(
target,
opts=ag__.ListStackOpts(
element_dtype=dtype,
original_call=orig_call))
"""
return templates.replace_as_expression(
template,
dtype=dtype,
target=node.args[0],
orig_call=node.func)
def visit_Call(self, node):
node = self.generic_visit(node)
# TODO(mdan): This is insufficient if target is a function argument.
# In the case of function arguments, we need to add the list to the
# function's return value, because it is being modified.
# TODO(mdan): Checking just the name is brittle, can it be improved?
if isinstance(node.func, gast.Attribute):
func_name = node.func.attr
if func_name == 'append' and (len(node.args) == 1):
node = self._replace_append_call(node)
elif func_name == 'pop' and (len(node.args) <= 1):
node = self._replace_pop_call(node)
elif (func_name == 'stack' and (len(node.args) == 1) and
(not node.keywords or node.keywords[0].arg == 'strict')):
# This avoids false positives with keyword args.
# TODO(mdan): handle kwargs properly.
node = self._replace_stack_call(node)
return node
def _generate_pop_operation(self, original_call_node, pop_var_name):
assert isinstance(original_call_node.func, gast.Attribute)
if original_call_node.args:
pop_element = original_call_node.args[0]
else:
pop_element = parser.parse_expression('None')
# The call will be something like "target.pop()", and the dtype is hooked to
# target, hence the func.value.
# TODO(mdan): For lists of lists, this won't work.
# The reason why it won't work is because it's unclear how to annotate
# the list as a "list of lists with a certain element type" when using
# operations like `l.pop().pop()`.
dtype = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
shape = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'shape',
default=templates.replace_as_expression('None'))
template = """
target, pop_var_name = ag__.list_pop(
target, element,
opts=ag__.ListPopOpts(element_dtype=dtype, element_shape=shape))
"""
return templates.replace(
template,
target=original_call_node.func.value,
pop_var_name=pop_var_name,
element=pop_element,
dtype=dtype,
shape=shape)
def _postprocess_statement(self, node):
"""Inserts any separate pop() calls that node may use."""
pop_uses = self.state[_Statement].pop_uses
if pop_uses:
replacements = []
for original_call_node, pop_var_name in pop_uses:
replacements.extend(
self._generate_pop_operation(original_call_node, pop_var_name))
replacements.append(node)
node = replacements
self.state[_Statement].exit()
return node, None
def _visit_and_process_block(self, block):
return self.visit_block(
block,
before_visit=self.state[_Statement].enter,
after_visit=self._postprocess_statement)
def visit_FunctionDef(self, node):
node.args = self.generic_visit(node.args)
node.decorator_list = self.visit_block(node.decorator_list)
node.body = self._visit_and_process_block(node.body)
return node
def visit_For(self, node):
node.target = self.visit(node.target)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_and_process_block(node.body)
return node
def transform(node, ctx):
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
return ListTransformer(ctx).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/lists.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Overloads all variable read operations."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import templates
class VariableAccessTransformer(converter.Base):
"""Rewrites basic symbol reads.
This transformer rewrites variable reads with a "read" operator which allows
tracking activity.
Example:
For a basic statement:
a = b + c
This is translated to:
a = ld(b) + ld(c)
Augmented assignment operations also introduce a `ld` operator:
a += b
The assignment target also receives an operator to properly represent the
read:
a = ld(a)
a += ld(b)
"""
def visit_Name(self, node):
# Only the loads which existed in the original code are overloaded.
if not anno.hasanno(node, anno.Static.ORIG_DEFINITIONS):
return node
if isinstance(node.ctx, gast.Load):
node = templates.replace_as_expression('ag__.ld(var_)', var_=node)
return node
def visit_Delete(self, node):
node = self.generic_visit(node)
rewrite_targets = []
for tgt in node.targets:
# Don't rewrite composites like `del a[0]`.
if isinstance(tgt, gast.Name):
rewrite_targets.append(tgt)
if not rewrite_targets:
return node
results = []
for tgt in rewrite_targets:
template = """
var_ = ag__.Undefined(var_name)
"""
results.extend(templates.replace(
template, var_=tgt, var_name=gast.Constant(tgt.id, kind=None)))
remaining_targets = [n for n in node.targets if n not in rewrite_targets]
if remaining_targets:
results.append(gast.Delete(targets=remaining_targets))
return results
def visit_AugAssign(self, node):
if isinstance(node.target, gast.Name):
template = """
var_ = ag__.ld(var_)
original
"""
node = templates.replace(template, var_=node.target, original=node)
else:
node = self.generic_visit(node)
return node
def transform(node, ctx):
return VariableAccessTransformer(ctx).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/variables.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes functions with multiple returns to use just one."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis.annos import NodeAnno
BODY_DEFINITELY_RETURNS = 'BODY_DEFINITELY_RETURNS'
ORELSE_DEFINITELY_RETURNS = 'ORELSE_DEFINITELY_RETURNS'
STMT_DEFINITELY_RETURNS = 'STMT_DEFINITELY_RETURNS'
class _RewriteBlock(object):
def __init__(self):
self.definitely_returns = False
class ConditionalReturnRewriter(converter.Base):
"""Rewrites a pattern where it's unobvious that all paths return a value.
This rewrite allows avoiding intermediate None return values.
The following pattern:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
is converted to:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
and vice-versa (if the else returns, subsequent statements are moved under the
if branch).
"""
def visit_Return(self, node):
self.state[_RewriteBlock].definitely_returns = True
return node
def _postprocess_statement(self, node):
# If the node definitely returns (e.g. it's a with statement with a
# return statement in it), then the current block also definitely returns.
if anno.getanno(node, STMT_DEFINITELY_RETURNS, default=False):
self.state[_RewriteBlock].definitely_returns = True
# The special case: collapse a typical conditional return pattern into
# a single conditional with possibly returns on both branches. This
# reduces the use of None return values, which don't work with TF
# conditionals.
if (isinstance(node, gast.If)
and anno.getanno(node, BODY_DEFINITELY_RETURNS, default=False)):
return node, node.orelse
elif (isinstance(node, gast.If)
and anno.getanno(node, ORELSE_DEFINITELY_RETURNS, default=False)):
return node, node.body
return node, None
def _visit_statement_block(self, node, nodes):
self.state[_RewriteBlock].enter()
new_nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
block_definitely_returns = self.state[_RewriteBlock].definitely_returns
self.state[_RewriteBlock].exit()
return new_nodes, block_definitely_returns
def visit_While(self, node):
node.test = self.visit(node.test)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body, definitely_returns = self._visit_statement_block(node, node.body)
if definitely_returns:
anno.setanno(node, STMT_DEFINITELY_RETURNS, True)
return node
def visit_Try(self, node):
# We could decide whether a 'try' DEFINITELY_RETURNS based on its components
# It is not clear whether we want to do anything with this given
# a 'try' is likely to throw an exception in some circumstances.
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
node.finalbody, _ = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
# To determine whether `try` DEFINITELY_RETURNS we need to revisit this.
node.body, _ = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body, body_definitely_returns = self._visit_statement_block(
node, node.body)
if body_definitely_returns:
anno.setanno(node, BODY_DEFINITELY_RETURNS, True)
node.orelse, orelse_definitely_returns = self._visit_statement_block(
node, node.orelse)
if orelse_definitely_returns:
anno.setanno(node, ORELSE_DEFINITELY_RETURNS, True)
if body_definitely_returns and orelse_definitely_returns:
self.state[_RewriteBlock].definitely_returns = True
return node
def visit_FunctionDef(self, node):
node.args = self.visit(node.args)
node.body, _ = self._visit_statement_block(node, node.body)
return node
class _Block(object):
def __init__(self):
self.is_function = False
self.return_used = False
self.create_guard_next = False
self.create_guard_now = False
def __repr__(self):
return 'used: {}'.format(
self.return_used)
class _Function(object):
def __init__(self):
self.do_return_var_name = None
self.retval_var_name = None
def __repr__(self):
return 'return control: {}, return value: {}'.format(
self.do_return_var_name, self.retval_var_name)
class ReturnStatementsTransformer(converter.Base):
"""Lowers return statements into variables and conditionals.
Specifically, the following pattern:
<block 1>
return val
<block 2>
is converted to:
do_return = False
retval = None
<block 1>
do_return = True
retval = val
if not do_return:
<block 2>
return retval
The conversion adjusts loops as well:
<block 1>
while cond:
<block 2>
return retval
is converted to:
<block 1>
while not do_return and cond:
<block 2>
do_return = True
retval = val
"""
def __init__(self, ctx, allow_missing_return):
super(ReturnStatementsTransformer, self).__init__(ctx)
self.allow_missing_return = allow_missing_return
def visit_Return(self, node):
for block in reversed(self.state[_Block].stack):
block.return_used = True
block.create_guard_next = True
if block.is_function:
break
retval = node.value if node.value else parser.parse_expression('None')
# Note: If `return <expr> raises, then the return is aborted.
# The try-catch below ensures the variables remain consistent in that case.
template = """
try:
do_return_var_name = True
retval_var_name = retval
except:
do_return_var_name = False
raise
"""
node = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
retval_var_name=self.state[_Function].retval_var_name,
retval=retval)
return node
def _postprocess_statement(self, node):
if not self.state[_Block].return_used:
return node, None
state = self.state[_Block]
if state.create_guard_now:
template = """
if not do_return_var_name:
original_node
"""
cond, = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
original_node=node)
node, block = cond, cond.body
else:
node, block = node, None
state.create_guard_now = state.create_guard_next
state.create_guard_next = False
return node, block
def _visit_statement_block(self, node, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
node.test = templates.replace_as_expression(
'not control_var and test',
test=node.test,
control_var=self.state[_Function].do_return_var_name)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST, default=None)
if extra_test is not None:
extra_test = templates.replace_as_expression(
'not control_var and extra_test',
extra_test=extra_test,
control_var=self.state[_Function].do_return_var_name)
else:
extra_test = templates.replace_as_expression(
'not control_var',
control_var=self.state[_Function].do_return_var_name)
anno.setanno(node, anno.Basic.EXTRA_LOOP_TEST, extra_test)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_statement_block(node, node.body)
return node
def visit_Try(self, node):
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
node.finalbody = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_FunctionDef(self, node):
with self.state[_Function] as fn:
with self.state[_Block] as block:
block.is_function = True
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
do_return_var_name = self.ctx.namer.new_symbol('do_return',
scope.referenced)
retval_var_name = self.ctx.namer.new_symbol('retval_', scope.referenced)
fn.do_return_var_name = do_return_var_name
fn.retval_var_name = retval_var_name
node.body = self._visit_statement_block(node, node.body)
if block.return_used:
if self.allow_missing_return:
# The function would have a single `with` node that wraps the
# entire body. If the function had a docstring, the body has two
# nodes, with the `with` as the second node.
wrapper_node = node.body[-1]
assert isinstance(wrapper_node, gast.With), (
'This transformer requires the functions converter.')
template = """
do_return_var_name = False
retval_var_name = ag__.UndefinedReturnValue()
body
return function_context.ret(retval_var_name, do_return_var_name)
"""
wrapper_node.body = templates.replace(
template,
body=wrapper_node.body,
do_return_var_name=do_return_var_name,
function_context=anno.getanno(node, 'function_context_name'),
retval_var_name=retval_var_name)
else:
template = """
body
return retval_var_name
"""
node.body = templates.replace(
template,
body=node.body,
do_return_var_name=do_return_var_name,
retval_var_name=retval_var_name)
return node
def transform(node, ctx, default_to_null_return=True):
"""Ensure a function has only a single return, at the end."""
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
# Note: Technically, these two could be merged into a single walk, but
# keeping them separate helps with readability.
node = ConditionalReturnRewriter(ctx).visit(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
transformer = ReturnStatementsTransformer(
ctx, allow_missing_return=default_to_null_return)
node = transformer.visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/return_statements.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for logical expressions, e.g. `a and b -> tf.logical_and(a, b)`."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import templates
# TODO(mdan): Properly extract boolean ops according to lazy eval rules.
# Note that this isn't completely safe either, because tensors may have control
# dependencies.
# Note that for loops that should be done after the loop was converted to
# tf.while_loop so that the expanded conditionals are properly scoped.
# Used to signal that an operand is safe for non-lazy evaluation.
SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND'
LOGICAL_OPERATORS = {
gast.And: 'ag__.and_',
gast.Not: 'ag__.not_',
gast.Or: 'ag__.or_',
}
EQUALITY_OPERATORS = {
gast.Eq: 'ag__.eq',
gast.NotEq: 'ag__.not_eq',
}
class LogicalExpressionTransformer(converter.Base):
"""Converts logical expressions to corresponding TF calls."""
def _overload_of(self, operator):
op_type = type(operator)
if op_type in LOGICAL_OPERATORS:
return LOGICAL_OPERATORS[op_type]
if self.ctx.user.options.uses(converter.Feature.EQUALITY_OPERATORS):
if op_type in EQUALITY_OPERATORS:
return EQUALITY_OPERATORS[op_type]
return None
def _as_lambda(self, expr):
return templates.replace_as_expression('lambda: expr', expr=expr)
def _as_binary_function(self, func_name, arg1, arg2):
return templates.replace_as_expression(
'func_name(arg1, arg2)',
func_name=parser.parse_expression(func_name),
arg1=arg1,
arg2=arg2)
def _as_binary_operation(self, op, arg1, arg2):
template = templates.replace_as_expression(
'arg1 is arg2', # Note: `is` will be replaced with `op` below.
arg1=arg1,
arg2=arg2)
template.ops[0] = op
return template
def _as_unary_function(self, func_name, arg):
return templates.replace_as_expression(
'func_name(arg)', func_name=parser.parse_expression(func_name), arg=arg)
def _process_binop(self, op, left, right):
overload = self._overload_of(op)
if overload is None:
return self._as_binary_operation(op, left, right)
return self._as_binary_function(overload, left, right)
def visit_Compare(self, node):
node = self.generic_visit(node)
ops_and_comps = list(zip(node.ops, node.comparators))
left = node.left
# Repeated comparisons are converted to conjunctions:
# a < b < c -> a < b and b < c
op_tree = None
while ops_and_comps:
op, right = ops_and_comps.pop(0)
binary_comparison = self._process_binop(op, left, right)
if op_tree is not None:
op_tree = self._as_binary_function('ag__.and_',
self._as_lambda(op_tree),
self._as_lambda(binary_comparison))
else:
op_tree = binary_comparison
left = right
assert op_tree is not None
return op_tree
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
overload = self._overload_of(node.op)
if overload is None:
return node
return self._as_unary_function(overload, node.operand)
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node_values = node.values
right = node.values.pop()
while node_values:
left = node_values.pop()
right = self._as_binary_function(
self._overload_of(node.op), self._as_lambda(left),
self._as_lambda(right))
return right
def transform(node, ctx):
transformer = LogicalExpressionTransformer(ctx)
return transformer.visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/logical_expressions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles control flow statements: while, for, if."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.lang import directives
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import origin_info
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import annos
from nvidia.dali._autograph.pyct.static_analysis import liveness
from nvidia.dali._autograph.pyct.static_analysis import reaching_definitions
from nvidia.dali._autograph.pyct.static_analysis import reaching_fndefs
class _Function(object):
scope = None
class ControlFlowTransformer(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
def visit_Lambda(self, node):
with self.state[_Function] as fn:
fn.scope = anno.getanno(node, anno.Static.SCOPE)
return self.generic_visit(node)
def visit_FunctionDef(self, node):
with self.state[_Function] as fn:
fn.scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
return self.generic_visit(node)
def _create_nonlocal_declarations(self, vars_):
vars_ = set(vars_)
results = []
global_vars = self.state[_Function].scope.globals & vars_
if global_vars:
results.append(gast.Global([str(v) for v in global_vars]))
nonlocal_vars = [
v for v in vars_ if not v.is_composite() and v not in global_vars]
if nonlocal_vars:
results.append(gast.Nonlocal([str(v) for v in nonlocal_vars]))
return results
def _create_state_functions(
self, block_vars, nonlocal_declarations, getter_name, setter_name):
if not block_vars:
template = """
def getter_name():
return ()
def setter_name(block_vars):
pass
"""
return templates.replace(
template, getter_name=getter_name, setter_name=setter_name)
guarded_block_vars = []
for v in block_vars:
if v.is_simple():
guarded_block_vars.append(v)
else:
guarded_block_vars.append(
templates.replace_as_expression(
'ag__.ldu(lambda: var_, name)',
var_=v,
name=gast.Constant(str(v), kind=None)))
template = """
def getter_name():
return guarded_state_vars,
def setter_name(vars_):
nonlocal_declarations
state_vars, = vars_
"""
return templates.replace(
template,
nonlocal_declarations=nonlocal_declarations,
getter_name=getter_name,
guarded_state_vars=guarded_block_vars,
setter_name=setter_name,
state_vars=tuple(block_vars))
def _create_loop_options(self, node):
if not anno.hasanno(node, anno.Basic.DIRECTIVES):
return gast.Dict([], [])
loop_directives = anno.getanno(node, anno.Basic.DIRECTIVES)
if directives.set_loop_options not in loop_directives:
return gast.Dict([], [])
opts_dict = loop_directives[directives.set_loop_options]
str_keys, values = zip(*opts_dict.items())
keys = [gast.Constant(s, kind=None) for s in str_keys]
values = list(values) # ast and gast don't play well with tuples.
return gast.Dict(keys, values)
def _create_undefined_assigns(self, undefined_symbols):
assignments = []
for s in undefined_symbols:
template = '''
var = ag__.Undefined(symbol_name)
'''
assignments += templates.replace(
template,
var=s,
symbol_name=gast.Constant(s.ssf(), kind=None))
return assignments
def _get_block_basic_vars(self, modified, live_in, live_out):
nonlocals = self.state[_Function].scope.nonlocals
basic_scope_vars = []
for s in modified:
if s.is_composite():
# TODO(mdan): Raise an error when this happens for a TF scope.
continue
# Variables not live into or out of the scope are considered local to the
# scope.
if s in live_in or s in live_out or s in nonlocals:
basic_scope_vars.append(s)
continue
return frozenset(basic_scope_vars)
def _get_block_composite_vars(self, modified, live_in):
# The scope variables corresponding to composite symbols (e.g. `self.x`).
composite_scope_vars = []
for s in modified:
if not s.is_composite():
continue
# Mutations made to objects created inside the scope will appear as writes
# to composite symbols. Because these mutations appear as modifications
# made to composite symbols, we check whether the composite's parent is
# actually live into the scope.
# Example:
# while cond:
# x = Foo()
# x.foo = 2 * x.foo # x.foo is live into the scope, but x is not.
#
# Note that some parents might not be symbols - for example, in x['foo'],
# 'foo' is a parent, but it's a literal, not a symbol. We don't check the
# liveness of literals.
support_set_symbols = tuple(
sss for sss in s.support_set if sss.is_symbol())
if not all(sss in live_in for sss in support_set_symbols):
continue
composite_scope_vars.append(s)
return frozenset(composite_scope_vars)
def _get_block_vars(self, node, modified):
"""Determines the variables affected inside a control flow statement."""
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
fn_scope = self.state[_Function].scope
basic_scope_vars = self._get_block_basic_vars(
modified,
live_in,
live_out)
composite_scope_vars = self._get_block_composite_vars(modified, live_in)
scope_vars = tuple(basic_scope_vars | composite_scope_vars)
# Variables that are modified inside the scope, but not defined
# before entering it. Only simple variables must be defined. The
# composite ones will be implicitly checked at runtime.
possibly_undefined = (
modified - defined_in - fn_scope.globals - fn_scope.nonlocals)
undefined = tuple(v for v in possibly_undefined if not v.is_composite())
# Variables that are modified inside the scope, and depend on values outside
# it.
input_only = basic_scope_vars & live_in - live_out
# Place the outputs first, then sort lexicographically.
scope_vars = sorted(scope_vars, key=lambda v: (v in input_only, v))
nouts = len(scope_vars) - len(input_only)
return scope_vars, undefined, nouts
def visit_If(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
cond_vars, undefined, nouts = self._get_block_vars(
node, body_scope.bound | orelse_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(cond_vars)
reserved = body_scope.referenced | orelse_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
cond_vars, nonlocal_declarations, state_getter_name, state_setter_name)
orelse_body = node.orelse
if not orelse_body:
orelse_body = [gast.Pass()]
template = """
state_functions
def body_name():
nonlocal_declarations
body
def orelse_name():
nonlocal_declarations
orelse
undefined_assigns
ag__.if_stmt(
test,
body_name,
orelse_name,
state_getter_name,
state_setter_name,
(symbol_names,),
nouts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('if_body', reserved),
orelse=orelse_body,
orelse_name=self.ctx.namer.new_symbol('else_body', reserved),
nonlocal_declarations=nonlocal_declarations,
nouts=gast.Constant(nouts, kind=None),
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in cond_vars),
test=node.test,
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
def visit_While(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
loop_vars, undefined, _ = self._get_block_vars(node, body_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(loop_vars)
reserved = body_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
loop_vars, nonlocal_declarations, state_getter_name, state_setter_name)
opts = self._create_loop_options(node)
template = """
state_functions
def body_name():
nonlocal_declarations
body
def test_name():
return test
undefined_assigns
ag__.while_stmt(
test_name,
body_name,
state_getter_name,
state_setter_name,
(symbol_names,),
opts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('loop_body', reserved),
nonlocal_declarations=nonlocal_declarations,
opts=opts,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in loop_vars),
test=node.test,
test_name=self.ctx.namer.new_symbol('loop_test', reserved),
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
def visit_For(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
iter_scope = anno.getanno(node, annos.NodeAnno.ITERATE_SCOPE)
loop_vars, undefined, _ = self._get_block_vars(
node, body_scope.bound | iter_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(loop_vars)
reserved = body_scope.referenced | iter_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
loop_vars, nonlocal_declarations, state_getter_name, state_setter_name)
opts = self._create_loop_options(node)
opts.keys.append(gast.Constant('iterate_names', kind=None))
opts.values.append(gast.Constant(
parser.unparse(node.target, include_encoding_marker=False), kind=None))
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST)
extra_test_name = self.ctx.namer.new_symbol(
'extra_test', reserved)
template = """
def extra_test_name():
nonlocal_declarations
return extra_test_expr
"""
extra_test_function = templates.replace(
template,
extra_test_expr=extra_test,
extra_test_name=extra_test_name,
loop_vars=loop_vars,
nonlocal_declarations=nonlocal_declarations)
else:
extra_test_name = parser.parse_expression('None')
extra_test_function = []
# iterate_arg_name holds a single arg with the iterates, which may be a
# tuple.
iterate_arg_name = self.ctx.namer.new_symbol('itr', reserved)
template = """
iterates = iterate_arg_name
"""
iterate_expansion = templates.replace(
template, iterate_arg_name=iterate_arg_name, iterates=node.target)
origin_info.copy_origin(node, iterate_expansion)
template = """
state_functions
def body_name(iterate_arg_name):
nonlocal_declarations
iterate_expansion
body
extra_test_function
undefined_assigns
ag__.for_stmt(
iterated,
extra_test_name,
body_name,
state_getter_name,
state_setter_name,
(symbol_names,),
opts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('loop_body', reserved),
extra_test_function=extra_test_function,
extra_test_name=extra_test_name,
iterate_arg_name=iterate_arg_name,
iterate_expansion=iterate_expansion,
iterated=node.iter,
nonlocal_declarations=nonlocal_declarations,
opts=opts,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in loop_vars),
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
def transform(node, ctx):
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = reaching_definitions.resolve(node, ctx, graphs)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = liveness.resolve(node, ctx, graphs)
node = ControlFlowTransformer(ctx).visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/control_flow.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles function calls, by generating compiled function names and calls.
Note: this transformer does not rename the top level object being converted;
that is the caller's responsibility.
Requires function_scopes.
"""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.utils import ag_logging
# TODO(mdan): Rename to FunctionCallsTransformer.
class _Function(object):
no_root = True
def __init__(self):
self.context_name = None
set_trace_warned = False
class _ArgTemplateBuilder(object):
"""Constructs a tuple representing the positional arguments in a call.
Example (yes, it's legal Python 3):
f(*args1, b, *args2, c, d) -> args1 + (b,) + args2 + (c, d)
"""
def __init__(self):
self._arg_accumulator = []
self._argspec = []
self._finalized = False
def _consume_args(self):
if self._arg_accumulator:
self._argspec.append(
gast.Tuple(elts=self._arg_accumulator, ctx=gast.Load()))
self._arg_accumulator = []
def add_arg(self, a):
self._arg_accumulator.append(a)
def add_stararg(self, a):
self._consume_args()
self._argspec.append(
gast.Call(
gast.Name(
'tuple', ctx=gast.Load(), annotation=None, type_comment=None),
args=[a],
keywords=()))
def finalize(self):
self._consume_args()
self._finalized = True
def to_ast(self):
assert self._finalized
if self._argspec:
result = self._argspec[0]
for i in range(1, len(self._argspec)):
result = gast.BinOp(result, gast.Add(), self._argspec[i])
return result
return gast.Tuple([], gast.Load())
class CallTreeTransformer(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def visit_Lambda(self, node):
if not anno.hasanno(node, 'function_context_name'):
# Lambda functions created during the conversion process have no
# context manager.
return self.generic_visit(node)
with self.state[_Function] as fn_scope:
fn_scope.context_name = anno.getanno(node, 'function_context_name')
return self.generic_visit(node)
def visit_FunctionDef(self, node):
# Decorators and arg defaults are part of the outer scope.
node.decorator_list = self.visit_block(node.decorator_list)
node.args.defaults = self.visit_block(node.args.defaults)
for i, d in enumerate(node.args.kw_defaults):
if d is not None:
node.args.kw_defaults[i] = self.visit(d)
with self.state[_Function] as fn_scope:
# Note: if the conversion process ever creates helper functions, this
# assumption will no longer hold.
assert anno.hasanno(node, 'function_context_name'), (
'The function_scopes converter always creates a scope for functions.')
fn_scope.context_name = anno.getanno(node, 'function_context_name')
node.body = self.visit_block(node.body)
if node.returns:
node.returns = self.visit(node.returns)
return node
def visit_With(self, node):
# Context manager calls (in node.items) are not converted.
node.body = self.visit_block(node.body)
return node
def _args_to_tuple(self, node):
"""Ties together all positional and *arg arguments in a single tuple."""
# TODO(mdan): We could rewrite this to just a call to tuple(). Maybe better?
# For example for
# f(a, b, *args)
# instead of writing:
# (a, b) + args
# just write this?
# tuple(a, b, *args)
builder = _ArgTemplateBuilder()
for a in node.args:
if isinstance(a, gast.Starred):
builder.add_stararg(a.value)
else:
builder.add_arg(a)
builder.finalize()
return builder.to_ast()
def _kwargs_to_dict(self, node):
"""Ties together all keyword and **kwarg arguments in a single dict."""
if node.keywords:
return gast.Call(
gast.Name(
'dict', ctx=gast.Load(), annotation=None, type_comment=None),
args=(),
keywords=node.keywords)
else:
return parser.parse_expression('None')
def visit_Call(self, node):
full_name = str(anno.getanno(node.func, anno.Basic.QN, default=''))
function_context_name = self.state[_Function].context_name
node = self.generic_visit(node)
# TODO(mdan): Refactor converted_call as a 'Call' operator.
# Calls to the internal 'ag__' module are never converted (though their
# arguments might be).
if full_name.startswith('ag__.'):
return node
# Calls to the function context manager (inserted by function_scopes) are
# also safe.
if full_name.startswith(function_context_name + '.'):
return node
# Calls to pdb.set_trace or ipdb.set_trace are never converted. We don't use
# the normal mechanisms to bypass these literals because they are sensitive
# to the frame they are being called from.
# TODO(mdan): Generalize this to a "static allowlist" config.
if full_name in ('pdb.set_trace', 'ipdb.set_trace', 'breakpoint'):
global set_trace_warned
if not set_trace_warned:
# TODO(klecki): Point to a DALI-specific documentation here.
ag_logging.warning(
'Detected `pdb.set_trace()` in user code. The code'
' generated by AutoGraph is not optimized for step-by-step'
' debugging.')
set_trace_warned = True
return node
if (full_name == 'print' and
not self.ctx.user.options.uses(converter.Feature.BUILTIN_FUNCTIONS)):
return node
template = """
ag__.converted_call(func, args, kwargs, function_ctx)
"""
new_call = templates.replace_as_expression(
template,
func=node.func,
args=self._args_to_tuple(node),
kwargs=self._kwargs_to_dict(node),
function_ctx=function_context_name)
return new_call
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
node = qual_names.resolve(node)
node = CallTreeTransformer(ctx).visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/call_trees.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code converters used by Autograph."""
# Naming conventions:
# * each converter should specialize on a single idiom; be consistent with
# the Python reference for naming
# * all converters inherit core.converter.Base
# * module names describe the idiom that the converter covers, plural
# * the converter class is named consistent with the module, singular and
# includes the word Transformer
#
# Example:
#
# lists.py
# class ListTransformer(converter.Base)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lowers break statements to conditionals."""
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis.annos import NodeAnno
class _Break(object):
def __init__(self):
self.used = False
self.control_var_name = None
def __repr__(self):
return 'used: %s, var: %s' % (self.used, self.control_var_name)
class BreakTransformer(converter.Base):
"""Canonicalizes break statements into additional conditionals."""
def visit_Break(self, node):
self.state[_Break].used = True
var_name = self.state[_Break].control_var_name
# TODO(mdan): This will fail when expanded inside a top-level else block.
template = """
var_name = True
continue
"""
return templates.replace(template, var_name=var_name)
def _guard_if_present(self, block, var_name):
"""Prevents the block from executing if var_name is set."""
if not block:
return block
template = """
if not var_name:
block
"""
node = templates.replace(
template,
var_name=var_name,
block=block)
return node
def _process_body(self, nodes, break_var):
self.state[_Break].enter()
self.state[_Break].control_var_name = break_var
nodes = self.visit_block(nodes)
break_used = self.state[_Break].used
self.state[_Break].exit()
return nodes, break_used
def visit_While(self, node):
original_node = node
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
break_var = self.ctx.namer.new_symbol('break_', scope.referenced)
node.test = self.visit(node.test)
node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
if not break_used:
template = """
while test:
body
orelse
"""
node = templates.replace(
template, test=node.test, body=node.body, orelse=node.orelse)
new_while_node = node[0]
anno.copyanno(original_node, new_while_node, anno.Basic.DIRECTIVES)
return node
# Python's else clause only triggers if the loop exited cleanly (e.g.
# break did not trigger).
guarded_orelse = self._guard_if_present(node.orelse, break_var)
template = """
var_name = False
while not var_name and test:
body
orelse
"""
node = templates.replace(
template,
var_name=break_var,
test=node.test,
body=node.body,
orelse=guarded_orelse)
new_while_node = node[1]
anno.copyanno(original_node, new_while_node, anno.Basic.DIRECTIVES)
return node
def visit_For(self, node):
original_node = node
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
break_var = self.ctx.namer.new_symbol('break_', scope.referenced)
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
if not break_used:
template = """
for target in iter_:
body
orelse
"""
node = templates.replace(
template,
iter_=node.iter,
target=node.target,
body=node.body,
orelse=node.orelse)
new_for_node = node[0]
anno.copyanno(original_node, new_for_node, anno.Basic.EXTRA_LOOP_TEST)
anno.copyanno(original_node, new_for_node, anno.Basic.DIRECTIVES)
return node
# Python's else clause only triggers if the loop exited cleanly (e.g.
# break did not trigger).
guarded_orelse = self._guard_if_present(node.orelse, break_var)
extra_test = templates.replace_as_expression(
'not var_name', var_name=break_var)
# The extra test is hidden in the AST, which will confuse the static
# analysis. To mitigate that, we insert a no-op statement that ensures
# the control variable is marked as used.
# TODO(mdan): Use a marker instead, e.g. ag__.condition_loop_on(var_name)
template = """
var_name = False
for target in iter_:
(var_name,)
body
orelse
"""
node = templates.replace(
template,
var_name=break_var,
iter_=node.iter,
target=node.target,
body=node.body,
orelse=guarded_orelse)
new_for_node = node[1]
anno.setanno(new_for_node, anno.Basic.EXTRA_LOOP_TEST, extra_test)
anno.copyanno(original_node, new_for_node, anno.Basic.DIRECTIVES)
return node
def transform(node, ctx):
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
transformer = BreakTransformer(ctx)
node = transformer.visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/break_statements.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles directives.
This converter removes the directive functions from the code and moves the
information they specify into AST annotations. It is a specialized form of
static analysis, one that is specific to AutoGraph.
Note that this requires that the actual directive functions are static - that
is, they do not change at runtime. So if you do something like this:
tf.autograph.set_loop_options = <new function>
Then the directive will may no longer be recognized. Furthermore, if the
converted function is cached, such an action may be irreversible.
"""
import inspect
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.lang import directives
from nvidia.dali._autograph.pyct import anno
STATIC_VALUE = 'static_value'
"""Used for AST annotations, see visit_Name."""
class _LoopScope(object):
def __init__(self):
self.ast_node = None
self.statements_visited = 0
def _map_args(call_node, function):
"""Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
Raises:
ValueError: if the default arguments are not correctly set
"""
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
call_args = inspect.getcallargs(function, *args, **kwds)
# Keyword arguments not specified in kwds will be mapped to their defaults,
# which are Python values. Since we don't currently have a way to transform
# those into AST references, we simply remove them. By convention, directives
# use UNSPECIFIED as default value for optional arguments. No other
# defaults should be present.
unexpected_defaults = []
for k in call_args:
if (k not in kwds
and call_args[k] not in args
and call_args[k] is not directives.UNSPECIFIED):
unexpected_defaults.append(k)
if unexpected_defaults:
raise ValueError('Unexpected keyword argument values, %s, for function %s'
% (zip(unexpected_defaults,
[call_args[k] for k in unexpected_defaults]),
function))
return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}
class DirectivesTransformer(converter.Base):
"""Parses compiler directives and converts them into AST annotations."""
def _process_symbol_directive(self, call_node, directive):
if len(call_node.args) < 1:
raise ValueError('"%s" requires a positional first argument'
' as the target' % directive.__name__)
target = call_node.args[0]
defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
for def_ in defs:
def_.directives[directive] = _map_args(call_node, directive)
return call_node
def _process_statement_directive(self, call_node, directive):
if self.state[_LoopScope].statements_visited > 1:
raise ValueError(
'"%s" must be the first statement in the loop block' % (
directive.__name__))
if self.state[_LoopScope].level < 2:
raise ValueError(
'"%s" must be used inside a statement' % directive.__name__)
target = self.state[_LoopScope].ast_node
node_anno = anno.getanno(target, anno.Basic.DIRECTIVES, {})
node_anno[directive] = _map_args(call_node, directive)
anno.setanno(target, anno.Basic.DIRECTIVES, node_anno)
return call_node
def visit_Name(self, node):
node = self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
is_defined = bool(defs)
if not is_defined and node.id in self.ctx.info.namespace:
anno.setanno(node, STATIC_VALUE, self.ctx.info.namespace[node.id])
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
parent_val = anno.getanno(node.value, STATIC_VALUE, default=None)
if parent_val is not None and inspect.ismodule(parent_val):
if hasattr(parent_val, node.attr):
anno.setanno(node, STATIC_VALUE, getattr(parent_val, node.attr))
return node
def visit_Assign(self, node):
self.state[_LoopScope].statements_visited += 1
return self.generic_visit(node)
def visit_AugAssign(self, node):
self.state[_LoopScope].statements_visited += 1
return self.generic_visit(node)
def visit_Expr(self, node):
self.state[_LoopScope].statements_visited += 1
node = self.generic_visit(node)
if isinstance(node.value, gast.Call):
call_node = node.value
static_val = anno.getanno(call_node.func, STATIC_VALUE, default=None)
if static_val is not None:
# Note: directive calls are not output in the generated code, hence
# the removal from the code by returning None.
if static_val is directives.set_element_type:
self._process_symbol_directive(call_node, static_val)
return None
elif static_val is directives.set_loop_options:
self._process_statement_directive(call_node, static_val)
return None
return node
# TODO(mdan): This will be insufficient for other control flow.
# That means that if we ever have a directive that affects things other than
# loops, we'll need support for parallel scopes, or have multiple converters.
def _track_and_visit_loop(self, node):
self.state[_LoopScope].enter()
self.state[_LoopScope].ast_node = node
node = self.generic_visit(node)
# Edge case: a loop with just one directive statement would become empty.
if not node.body:
node.body = [gast.Pass()]
self.state[_LoopScope].exit()
return node
def visit_While(self, node):
return self._track_and_visit_loop(node)
def visit_For(self, node):
return self._track_and_visit_loop(node)
def transform(node, ctx):
return DirectivesTransformer(ctx).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/directives.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes continue statements by de-sugaring into a control boolean."""
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis.annos import NodeAnno
class _Continue(object):
def __init__(self):
self.used = False
self.control_var_name = None
def __repr__(self):
return '<_Continue(used: {}, var: {})>'.format(self.used,
self.control_var_name)
class _Block(object):
"""Tracks information about lexical blocks as they are visited in the AST.
Mainly, this object tracks the creation of block guards that replace
`continue` statements (e.g. `if not continue_:`).
Attributes:
create_guard_current: bool, whether to create a guard for the current
statement.
create_guard_next: bool, whether to create a guard for the next
statement.
is_loop_type: bool, whether this block is the body of a loop.
"""
def __init__(self):
self.is_loop_type = False
self.create_guard_current = False
self.create_guard_next = False
class ContinueCanonicalizationTransformer(converter.Base):
"""Canonicalizes continue statements into additional conditionals."""
def visit_Continue(self, node):
self.state[_Continue].used = True
for block in reversed(self.state[_Block].stack):
# See ContinueCanonicalizationTest.test_multiple_continues for an example
# it's necessary to create guards for all enclosing affected blocks, not
# just that of the current block.
block.create_guard_next = True
if block.is_loop_type:
# continue only affects the innermost loop
break
template = """
var_name = True
"""
return templates.replace(
template, var_name=self.state[_Continue].control_var_name)
def _postprocess_statement(self, node):
if self.state[_Continue].used:
block = self.state[_Block]
should_wrap_current = block.create_guard_current
# After processing propagate whether to guard the next statement
block.create_guard_current = block.create_guard_next
block.create_guard_next = False
if should_wrap_current:
template = """
if not var_name:
original_node
"""
cond, = templates.replace(
template,
var_name=self.state[_Continue].control_var_name,
original_node=node)
return cond, cond.body
return node, None
def _visit_loop_body(self, node, nodes):
self.state[_Continue].enter()
self.state[_Block].enter()
self.state[_Block].is_loop_type = True
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
continue_var = self.ctx.namer.new_symbol('continue_', scope.referenced)
self.state[_Continue].control_var_name = continue_var
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
if self.state[_Continue].used:
template = """
var_name = False
"""
control_var_init = templates.replace(template, var_name=continue_var)
nodes = control_var_init + nodes
self.state[_Block].exit()
self.state[_Continue].exit()
return nodes
def _visit_non_loop_body(self, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_For(self, node):
node.target = self.generic_visit(node.target)
node.iter = self.generic_visit(node.iter)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_If(self, node):
node.body = self._visit_non_loop_body(node.body)
node.orelse = self._visit_non_loop_body(node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_non_loop_body(node.body)
return node
def visit_Try(self, node):
node.body = self._visit_non_loop_body(node.body)
node.orelse = self._visit_non_loop_body(node.orelse)
# In Python 3.8 and later continue is allowed in finally blocks
node.finalbody = self._visit_non_loop_body(node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_non_loop_body(node.body)
return node
def transform(node, ctx):
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = ContinueCanonicalizationTransformer(ctx).visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/continue_statements.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for slice operations."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.lang import directives
from nvidia.dali._autograph.pyct import templates
class SliceTransformer(converter.Base):
"""Converts slicing operations to their TF counterpart.
Currently, relying on the default slice operator that Tensor uses is
insufficient, because TensorArray and tensor lists use dedicated index read
and write functions.
"""
def _process_single_assignment(self, target, value):
if not isinstance(target, gast.Subscript):
return None
s = target.slice
if isinstance(s, (gast.Tuple, gast.Slice)):
return None
template = """
target = ag__.set_item(target, key, item)
"""
return templates.replace(
template, target=target.value, key=target.slice, item=value)
def visit_Assign(self, node):
node = self.generic_visit(node)
# TODO(mdan): Support unpackings and multiple assignments.
if len(node.targets) != 1:
raise NotImplementedError('multiple assignment')
replacement = self._process_single_assignment(node.targets[0], node.value)
if replacement is not None:
return replacement
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
s = node.slice
if isinstance(s, (gast.Tuple, gast.Slice)):
return node
if not isinstance(node.ctx, gast.Load):
# Index writes are handled at a higher level, one at which the rvalue is
# also available.
return node
dtype = self.get_definition_directive(
node.value,
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.get_item(
target,
key,
opts=ag__.GetItemOpts(element_dtype=dtype))
"""
return templates.replace_as_expression(
template, target=node.value, key=s, dtype=dtype)
def transform(node, ctx):
return SliceTransformer(ctx).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/slices.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts the ternary conditional operator."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import templates
class ConditionalExpressionTransformer(converter.Base):
"""Converts conditional expressions to functional form."""
def visit_IfExp(self, node):
template = '''
ag__.if_exp(
test,
lambda: true_expr,
lambda: false_expr,
expr_repr)
'''
expr_repr = parser.unparse(node.test, include_encoding_marker=False).strip()
return templates.replace_as_expression(
template,
test=node.test,
true_expr=node.body,
false_expr=node.orelse,
expr_repr=gast.Constant(expr_repr, kind=None))
def transform(node, ctx):
node = ConditionalExpressionTransformer(ctx).visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/conditional_expressions.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts assert statements to their corresponding TF calls."""
import gast
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.pyct import templates
class AssertTransformer(converter.Base):
"""Transforms Assert nodes to Call so they can be handled as functions."""
def visit_Assert(self, node):
self.generic_visit(node)
# Note: The lone tf.Assert call will be wrapped with control_dependencies
# by side_effect_guards.
template = """
ag__.assert_stmt(test, lambda: msg)
"""
if node.msg is None:
return templates.replace(
template,
test=node.test,
msg=gast.Constant('Assertion error', kind=None))
elif isinstance(node.msg, gast.Constant):
return templates.replace(template, test=node.test, msg=node.msg)
else:
raise NotImplementedError('can only convert string messages for now.')
def transform(node, ctx):
node = AssertTransformer(ctx).visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/converters/asserts.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OperatorBase:
"""User may implement hooks for detection of overloads that are inserted by autograph
and their implementation.
In TF's AutoGraph, the AST is transformed to insert AutoGraph operators (operators/ dir),
for example if and for statements are replaced by ag__.if_stmt and ag__.for_stmt function
calls.
In such operator AutoGraph checks if one of the arguments is the user-defined type of interest
(by default a tf.Tensor of tf.data.Dataset) and if so, appropriate graph operations are inserted.
Otherwise a _py... fallback like _py_if_stmt or _py_for_stmt is invoked, providing default
Python semantics.
The user that wants to customize AutoGraph can do so, by overloading OperatorBase.
Any `detect_overload_x` function is used to detect the objects of user-defined type,
corresponding `x` function is supposed to implement the particular overload.
If `detect_overload_x` returns True, the `x` is called.
For example, `detect_overload_for_stmt` can be used to recognize that the iter_ in for statement
is a custom user-defined type and the iteration should have a custom implementation,
that is provided in `for_stmt`.
See the documentation in operators/ for description of the arguments used in those overloads,
the names of the functions are matching, so OperatorBase.for_stmt <-> control_flow.for_stmt.
"""
def detect_overload(self, object):
"""Generic detection of custom user-defined type used for all overloads.
Parameters
----------
object
Custom object detected by user or regular Python type.
Returns
-------
bool
True if custom operator implementation should be always used, False to fallback to Python
behavior.
"""
return False
def detect_overload_ld(self, v):
return self.detect_overload(v)
def ld(self, v):
pass
def detect_overload_if_exp(self, cond):
return self.detect_overload(cond)
def if_exp(self, cond, if_true, if_false, expr_repr):
pass
def detect_overload_for_stmt(self, iter_):
return self.detect_overload(iter_)
def for_stmt(self, iter_, extra_test, body, get_state, set_state, symbol_names, opts):
pass
def detect_overload_while_stmt(self, test):
return self.detect_overload(test)
def while_stmt(self, test, body, get_state, set_state, symbol_names, opts):
pass
def detect_overload_if_stmt(self, cond):
return self.detect_overload(cond)
def if_stmt(self, cond, body, orelse, get_state, set_state, symbol_names, nouts):
pass
def detect_overload_assert_stmt(self, expression1):
return self.detect_overload(expression1)
def assert_stmt(self, expression1, expression2):
pass
def detect_overload_not_(self, a):
return self.detect_overload(a)
def not_(self, a):
pass
def detect_overload_lazy_and(self, a):
return self.detect_overload(a)
def lazy_and(self, a_val, b):
pass
def detect_overload_lazy_or(self, a):
return self.detect_overload(a)
def lazy_or(self, a_val, b):
pass
def detect_overload_equal(self, a):
return self.detect_overload(a)
def equal(self, a, b):
pass
def detect_overload_abs_(self, a):
return self.detect_overload(a)
def abs_(self, x):
pass
def detect_overload_float_(self, x):
return self.detect_overload(x)
def float_(self, x):
pass
def detect_overload_int_(self, x):
return self.detect_overload(x)
def int_(self, x, base):
pass
def detect_overload_len_(self, x):
return self.detect_overload(x)
def len_(self, s):
pass
def detect_overload_print_(self, objects):
return any(self.detect_overload(x) for x in objects)
def print_(self, objects, kwargs):
pass
def detect_overload_min_(self, args):
return any(self.detect_overload(x) for x in args)
def min_(self, *args, **kwargs):
pass
def detect_overload_max_(self, args):
return any(self.detect_overload(x) for x in args)
def max_(self, *args, **kwargs):
pass
def detect_overload_range_(self, start_or_stop, stop, step):
return any(self.detect_overload(x) for x in (start_or_stop, stop, step))
def range_(self, start_or_stop, stop, step):
pass
def detect_overload_enumerate_(self, s):
return self.detect_overload(s)
def enumerate_(self, s, start):
pass
def detect_overload_zip_(self, iterables):
return all(self.detect_overload(x) for x in iterables)
def zip_(self, *iterables):
pass
def detect_overload_map_(self, iterables):
return all(self.detect_overload(x) for x in iterables)
def map_(self, fn, *iterables):
pass
def detect_overload_next_(self, iterator):
return self.detect_overload(iterator)
def next_(self, iterator, default):
pass
def detect_overload_filter_(self, iterable):
return self.detect_overload(iterable)
def filter_(self, function, iterable):
pass
def detect_overload_any_(self, iterable):
return self.detect_overload(iterable)
def any_(self, iterable):
pass
def detect_overload_all_(self, iterable):
return self.detect_overload(iterable)
def all_(self, iterable):
pass
def detect_overload_sorted_(self, iterable):
return self.detect_overload(iterable)
def sorted_(self, iterable, key, reverse):
pass
def detect_overload_get_item(self, target):
return self.detect_overload(target)
def get_item(self, target, i):
pass
def detect_overload_set_item(self, target):
return self.detect_overload(target)
def set_item(self, target, i, x):
pass
def detect_overload_list_new(self, iterable):
return self.detect_overload(iterable)
def list_new(self, iterable):
pass
def detect_overload_list_append(self, list_):
return self.detect_overload(list_)
def list_append(self, list_, x):
pass
def detect_overload_list_pop(self, list_):
return self.detect_overload(list_)
def list_pop(self, list_, i):
pass
def detect_overload_list_stack(self, list_):
return self.detect_overload(list_)
def list_stack(self, list_, opts):
pass
_DISPATCH = OperatorBase()
|
DALI-main
|
dali/python/nvidia/dali/_autograph/utils/hooks.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility module that contains APIs usable in the generated code."""
|
DALI-main
|
dali/python/nvidia/dali/_autograph/utils/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities not strictly related to autograph that are moved here or just implemented
as no-op placeholders if the actual functionality doesn't matter - for example the scope of API
export and it's management is not important if we import the autograph as internal symbol.
"""
import inspect
def _remove_undocumented(module_name, allowed_exception_list=None, doc_string_modules=None):
pass
def export_symbol(*args, **kwargs):
"""No-op replacement for @tf_export. This is decorator factory that accepts arguments"""
def actual_decorator(function):
return function
return actual_decorator
def make_decorator(
target, decorator_func, decorator_name=None, decorator_doc='', decorator_argspec=None
):
"""Make a decorator from a wrapper and a target.
Args:
target: The final callable to be wrapped.
decorator_func: The wrapper function.
decorator_name: The name of the decorator. If `None`, the name of the
function calling make_decorator.
decorator_doc: Documentation specific to this application of
`decorator_func` to `target`.
decorator_argspec: The new callable signature of this decorator.
Returns:
The `decorator_func` argument with new metadata attached.
Note that we just wrap the function and adjust the members but do not insert the special
member TFDecorator
"""
if decorator_name is None:
decorator_name = inspect.currentframe().f_back.f_code.co_name
# Objects that are callables (e.g., a functools.partial object) may not have
# the following attributes.
if hasattr(target, '__name__'):
decorator_func.__name__ = target.__name__
if hasattr(target, '__qualname__'):
decorator_func.__qualname__ = target.__qualname__
if hasattr(target, '__module__'):
decorator_func.__module__ = target.__module__
if hasattr(target, '__dict__'):
# Copy dict entries from target which are not overridden by decorator_func.
for name in target.__dict__:
if name not in decorator_func.__dict__:
decorator_func.__dict__[name] = target.__dict__[name]
decorator_func.__wrapped__ = target
# Keeping a second handle to `target` allows callers to detect whether the
# decorator was modified using `rewrap`.
decorator_func.__original_wrapped__ = target
return decorator_func
# TODO(klecki): Introduce tests for control flow of integrated library (DALI)
def custom_constant(val, shape=None, dtype=None):
"""Customization point to introduce library-specific argument to the control flow.
Currently those tests fallback to Python implementation"""
return val
|
DALI-main
|
dali/python/nvidia/dali/_autograph/utils/all_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and debugging utilities."""
import os
import sys
import traceback
import logging
# TODO(mdan): Use a custom logger class.
from nvidia.dali._autograph.utils.all_utils import export_symbol
VERBOSITY_VAR_NAME = 'AUTOGRAPH_VERBOSITY'
DEFAULT_VERBOSITY = 0
verbosity_level = None # vlog-like. Takes precedence over the env variable.
echo_log_to_stdout = False
# In interactive Python, logging echo is enabled by default.
if hasattr(sys, 'ps1') or hasattr(sys, 'ps2'):
echo_log_to_stdout = True
@export_symbol('autograph.set_verbosity')
def set_verbosity(level, alsologtostdout=False):
"""Sets the AutoGraph verbosity level.
_Debug logging in AutoGraph_
More verbose logging is useful to enable when filing bug reports or doing
more in-depth debugging.
There are two means to control the logging verbosity:
* The `set_verbosity` function
* The `AUTOGRAPH_VERBOSITY` environment variable
`set_verbosity` takes precedence over the environment variable.
For example:
```python
import os
import tensorflow as tf
os.environ['AUTOGRAPH_VERBOSITY'] = '5'
# Verbosity is now 5
tf.autograph.set_verbosity(0)
# Verbosity is now 0
os.environ['AUTOGRAPH_VERBOSITY'] = '1'
# No effect, because set_verbosity was already called.
```
Logs entries are output to [absl](https://abseil.io)'s
[default output](https://abseil.io/docs/python/guides/logging),
with `INFO` level.
Logs can be mirrored to stdout by using the `alsologtostdout` argument.
Mirroring is enabled by default when Python runs in interactive mode.
Args:
level: int, the verbosity level; larger values specify increased verbosity;
0 means no logging. When reporting bugs, it is recommended to set this
value to a larger number, like 10.
alsologtostdout: bool, whether to also output log messages to `sys.stdout`.
"""
global verbosity_level
global echo_log_to_stdout
verbosity_level = level
echo_log_to_stdout = alsologtostdout
@export_symbol('autograph.trace')
def trace(*args):
"""Traces argument information at compilation time.
`trace` is useful when debugging, and it always executes during the tracing
phase, that is, when the TF graph is constructed.
_Example usage_
```python
import tensorflow as tf
for i in tf.range(10):
tf.autograph.trace(i)
# Output: <Tensor ...>
```
Args:
*args: Arguments to print to `sys.stdout`.
"""
print(*args)
def get_verbosity():
global verbosity_level
if verbosity_level is not None:
return verbosity_level
return int(os.getenv(VERBOSITY_VAR_NAME, DEFAULT_VERBOSITY))
def has_verbosity(level):
return get_verbosity() >= level
def _output_to_stdout(msg, *args, **kwargs):
print(msg % args)
if kwargs.get('exc_info', False):
traceback.print_exc()
def error(level, msg, *args, **kwargs):
if has_verbosity(level):
logging.error(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout('ERROR: ' + msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
if has_verbosity(level):
logging.info(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
logging.warning(msg, *args, **kwargs)
if echo_log_to_stdout:
_output_to_stdout('WARNING: ' + msg, *args, **kwargs)
sys.stdout.flush()
|
DALI-main
|
dali/python/nvidia/dali/_autograph/utils/ag_logging.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/_autograph/lang/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Directives are special no-op functions that serve as compilation markers.
They provide static information like type hints, compilation and TensorFlow
overrides.
These serve as annotations in the compiled code, allowing the user some control
over the compilation process. They have no functional role at runtime.
"""
from nvidia.dali._autograph.utils.all_utils import export_symbol
UNSPECIFIED = object()
def set_element_type(entity, dtype, shape=UNSPECIFIED):
"""Indicates that the entity is expected hold items of specified type/shape.
The staged TensorFlow ops will reflect and assert this data type. Ignored
otherwise.
Args:
entity: The entity to annotate.
dtype: TensorFlow dtype value to assert for entity.
shape: Optional shape to assert for entity.
"""
del entity
del dtype
del shape
@export_symbol('autograph.experimental.set_loop_options')
def set_loop_options(
parallel_iterations=UNSPECIFIED,
swap_memory=UNSPECIFIED,
maximum_iterations=UNSPECIFIED,
shape_invariants=UNSPECIFIED):
"""Specifies additional arguments to be passed to the enclosing while_loop.
The parameters apply to and only to the immediately enclosing loop. It only
has effect if the loop is staged as a TF while_loop; otherwise the parameters
have no effect.
Usage:
>>> @tf.function(autograph=True)
... def f():
... n = 0
... for i in tf.range(10):
... tf.autograph.experimental.set_loop_options(maximum_iterations=3)
... n += 1
... return n
>>> @tf.function(autograph=True)
... def f():
... v = tf.constant((0,))
... for i in tf.range(3):
... tf.autograph.experimental.set_loop_options(
... shape_invariants=[(v, tf.TensorShape([None]))]
... )
... v = tf.concat((v, [i]), 0)
... return v
Also see tf.while_loop.
Args:
parallel_iterations: The maximum number of iterations allowed to run in
parallel at any given time. Note that this does not guarantee parallel
execution.
swap_memory: Whether to store intermediate values needed for
gradients on the CPU instead of GPU.
maximum_iterations: Allows limiting the total number of iterations executed
by the loop.
shape_invariants: Allows controlling the argument with the same name passed
to tf.while_loop. Unlike tf.while_loop, this is a list of
`(tensor, shape)` pairs.
"""
del parallel_iterations
del swap_memory
del maximum_iterations
del shape_invariants
|
DALI-main
|
dali/python/nvidia/dali/_autograph/lang/directives.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph (CFG) structure for Python AST representation.
The CFG is a digraph with edges representing valid control flow. Each
node is associated with exactly one AST node, but not all AST nodes may have
a corresponding CFG counterpart.
Once built, the CFG itself is immutable, but the values it holds need not be;
they are usually annotated with information extracted by walking the graph.
Tip: Use `Graph.as_dot` to visualize the CFG using any DOT viewer.
Note: the CFG tries to include all code paths that MAY be taken, with a single
notable exception:
* function calls do not generate edges corresponding to exceptions they may
raise (i.e. a function call in the middle of a block does not return or jump
to any except or finally block)
TODO(mdan): Consider adding the edges above. They'd only add ~O(n) edges.
TODO(mdan): Alternatively, consider adding an edge from try to all its excepts.
"""
# TODO(mdan): The notion of 'statements' below is inaccurate.
# They should rather be called 'block statements', because they include
# statements that may have a body, e.g. if and while.
import collections
import enum
import weakref
from typing import Dict, Set, Tuple
import astunparse
import gast
from nvidia.dali._autograph.pyct import anno
class Node(object):
"""A node in the CFG.
Although new instances of this class are mutable, the objects that a user
finds in the CFG are typically not.
The nodes represent edges in the CFG graph, and maintain pointers to allow
efficient walking in both forward and reverse order. The following property
holds for all nodes: "child in node.next" iff "node in child.prev".
Attributes:
next: FrozenSet[Node, ...], the nodes that follow this node, in control flow
order
prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
control flow order
ast_node: ast.AST, the AST node corresponding to this CFG node
"""
def __init__(self, next_, prev, ast_node):
self.next = next_
self.prev = prev
self.ast_node = ast_node
def freeze(self):
self.next = frozenset(self.next)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
self.prev = weakref.WeakSet(self.prev)
def __repr__(self):
if isinstance(self.ast_node, gast.FunctionDef):
return 'def %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.ClassDef):
return 'class %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.withitem):
# TODO(xjun): remove use of astunparse
return astunparse.unparse(self.ast_node.context_expr).strip()
return astunparse.unparse(self.ast_node).strip()
class Graph(
collections.namedtuple(
'Graph',
['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
The graph also maintains edges corresponding to higher level statements
like for-else loops. A node is considered successor of a statement if there
is an edge from a node that is lexically a child of that statement to a node
that is not. Statement predecessors are analogously defined.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node
stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes
to their predecessor CFG nodes
stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes
to their successor CFG nodes
"""
def __repr__(self):
return self.as_dot()
def as_dot(self):
"""Print CFG in DOT format."""
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
for next_ in node.next:
result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
class _WalkMode(enum.Enum):
FORWARD = 1
REVERSE = 2
# TODO(mdan): Rename to DataFlowAnalyzer.
# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
class GraphVisitor(object):
"""Base class for a CFG visitors.
This implementation is not thread safe.
The visitor has some facilities to simplify dataflow analyses. In particular,
it allows revisiting the nodes at the decision of the subclass. This can be
used to visit the graph until the state reaches a fixed point.
For more details on dataflow analysis, see
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Note: the literature generally suggests visiting successor nodes only when the
state of the current node changed, regardless of whether that successor has
ever been visited. This implementation visits every successor at least once.
Attributes:
graph: Graph
in_: Dict[Node, Any], stores node-keyed state during a visit
out: Dict[Node, Any], stores node-keyed state during a visit
"""
def __init__(self, graph):
self.graph = graph
self.reset()
def init_state(self, node):
"""State initialization function.
Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
must overload this to control what that is initialized to.
Args:
node: Node
"""
raise NotImplementedError('Subclasses must implement this.')
# TODO(mdan): Rename to flow?
def visit_node(self, node):
"""Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
"""
raise NotImplementedError('Subclasses must implement this.')
def reset(self):
self.in_ = {
node: self.init_state(node) for node in self.graph.index.values()
}
self.out = {
node: self.init_state(node) for node in self.graph.index.values()
}
def can_ignore(self, node):
"""Returns True if the node can safely be assumed not to touch variables."""
ast_node = node.ast_node
if anno.hasanno(ast_node, anno.Basic.SKIP_PROCESSING):
return True
return isinstance(ast_node,
(gast.Break, gast.Continue, gast.Raise, gast.Pass))
def _visit_internal(self, mode):
"""Visits the CFG, breadth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
if mode == _WalkMode.FORWARD:
open_ = [self.graph.entry]
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
while open_:
node = open_.pop(0)
closed.add(node)
should_revisit = self.visit_node(node)
if mode == _WalkMode.FORWARD:
children = node.next
elif mode == _WalkMode.REVERSE:
children = node.prev
for next_ in children:
if should_revisit or next_ not in closed:
open_.append(next_)
def visit_forward(self):
self._visit_internal(_WalkMode.FORWARD)
def visit_reverse(self):
self._visit_internal(_WalkMode.REVERSE)
class GraphBuilder(object):
"""Builder that constructs a CFG from a given AST.
This GraphBuilder facilitates constructing the DAG that forms the CFG when
nodes
are supplied in lexical order (i.e., top-down, depth first). Under these
conditions, it supports building patterns found in typical structured
programs.
This builder ignores the flow generated by exceptions, which are assumed to
always be catastrophic and present purely for diagnostic purposes (e.g. to
print debug information). Statements like raise and try/catch sections are
allowed and will generate control flow edges, but ordinary statements are
assumed not to raise exceptions.
Finally sections are also correctly interleaved between break/continue/return
nodes and their subsequent statements.
Important concepts:
* nodes - nodes refer to CFG nodes; AST nodes are qualified explicitly
* leaf set - since the graph is constructed gradually, a leaf set maintains
the CFG nodes that will precede the node that the builder expects to
receive next; when an ordinary node is added, it is connected to the
existing leaves and it in turn becomes the new leaf
* jump nodes - nodes that should generate edges other than what
ordinary nodes would; these correspond to break, continue and return
statements
* sections - logical delimiters for subgraphs that require special
edges; there are various types of nodes, each admitting various
types of jump nodes; sections are identified by their corresponding AST
node
"""
# TODO(mdan): Perhaps detail this in a markdown doc.
# TODO(mdan): Add exception support.
def __init__(self, parent_ast_node):
self.reset()
self.parent = parent_ast_node
def reset(self):
"""Resets the state of this factory."""
self.head = None
self.errors = set()
self.node_index = {}
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
# Note: This mechanism requires that nodes are added in lexical order (top
# to bottom, depth first).
self.active_stmts = set()
self.owners = {} # type: Set[any]
self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
self.finally_sections = {}
# Dict values represent (entry, exits)
self.finally_section_subgraphs = {
} # type: Dict[ast.AST, Tuple[Node, Set[Node]]] # noqa: F821
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
# Finally sections that await their first node.
self.pending_finally_sections = set()
# Exit jumps keyed by the section they affect.
self.exits = {}
# The entry of loop sections, keyed by the section.
self.section_entry = {}
# Continue jumps keyed by the section they affect.
self.continues = {}
# Raise jumps keyed by the except section guarding them.
self.raises = {}
# The entry of conditional sections, keyed by the section.
self.cond_entry = {}
# Lists of leaf nodes corresponding to each branch in the section.
self.cond_leaves = {}
def _connect_nodes(self, first, second):
"""Connects nodes to signify that control flows from first to second.
Args:
first: Union[Set[Node, ...], Node]
second: Node
"""
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
self.forward_edges.add((first, second))
else:
for node in first:
self._connect_nodes(node, second)
def _add_new_node(self, ast_node):
"""Grows the graph by adding a CFG node following the current leaves."""
if ast_node is self.node_index:
raise ValueError('%s added twice' % ast_node)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node)
self.node_index[ast_node] = node
self.owners[node] = frozenset(self.active_stmts)
if self.head is None:
self.head = node
for leaf in self.leaves:
self._connect_nodes(leaf, node)
# If any finally section awaits its first node, populate it.
for section_id in self.pending_finally_sections:
self.finally_section_subgraphs[section_id][0] = node
self.pending_finally_sections = set()
return node
def begin_statement(self, stmt):
"""Marks the beginning of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in the
CFG's stmt_prev and stmt_next attributes
"""
self.active_stmts.add(stmt)
def end_statement(self, stmt):
"""Marks the end of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in the
CFG's stmt_prev and stmt_next attributes; must match a key previously
passed to begin_statement.
"""
self.active_stmts.remove(stmt)
def add_ordinary_node(self, ast_node):
"""Grows the graph by adding an ordinary CFG node.
Ordinary nodes are followed by the next node, in lexical order, that is,
they become the new leaf set.
Args:
ast_node: ast.AST
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set((node,))
return node
def _add_jump_node(self, ast_node, guards):
"""Grows the graph by adding a jump node.
Jump nodes are added to the current leaf set, and the leaf set becomes
empty. If the jump node is the last in a cond section, then it may be added
back to the leaf set by a separate mechanism.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections active for this node
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set()
# The guards themselves may not yet be complete, and will be wired later.
self.finally_sections[node] = guards
return node
def _connect_jump_to_finally_sections(self, node):
"""Connects a jump node to the finally sections protecting it."""
cursor = set((node,))
if node not in self.finally_sections:
return cursor
for guard_section_id in self.finally_sections[node]:
guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]
self._connect_nodes(cursor, guard_begin)
cursor = guard_ends
del self.finally_sections[node]
# TODO(mdan): Should garbage-collect finally_section_subgraphs.
return cursor
def add_exit_node(self, ast_node, section_id, guards):
"""Grows the graph by adding an exit node.
This node becomes an exit for the current section.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered to
be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
Returns:
Node
"""
node = self._add_jump_node(ast_node, guards)
self.exits[section_id].add(node)
return node
def add_continue_node(self, ast_node, section_id, guards):
"""Grows the graph by adding a reentry node.
This node causes control flow to go back to the loop section's entry.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered to
be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.continues[section_id].add(node)
def connect_raise_node(self, node, except_guards):
"""Adds extra connection between a raise node and containing except guards.
The node is a graph node, not an ast node.
Args:
node: Node
except_guards: Tuple[ast.AST, ...], the except sections that guard node
"""
for guard in except_guards:
if guard in self.raises:
self.raises[guard].append(node)
else:
self.raises[guard] = [node]
def enter_section(self, section_id):
"""Enters a regular section.
Regular sections admit exit jumps, which end the section.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_exit_node
"""
assert section_id not in self.exits
self.exits[section_id] = set()
def exit_section(self, section_id):
"""Exits a regular section."""
# Exits are jump nodes, which may be protected.
for exit_ in self.exits[section_id]:
self.leaves |= self._connect_jump_to_finally_sections(exit_)
del self.exits[section_id]
def enter_loop_section(self, section_id, entry_node):
"""Enters a loop section.
Loop sections define an entry node. The end of the section always flows back
to the entry node. These admit continue jump nodes which also flow to the
entry node.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_continue_node
entry_node: ast.AST, the entry node into the loop (e.g. the test node for
while loops)
"""
assert section_id not in self.section_entry
assert section_id not in self.continues
self.continues[section_id] = set()
node = self.add_ordinary_node(entry_node)
self.section_entry[section_id] = node
def exit_loop_section(self, section_id):
"""Exits a loop section."""
self._connect_nodes(self.leaves, self.section_entry[section_id])
# continues are jump nodes, which may be protected.
for reentry in self.continues[section_id]:
guard_ends = self._connect_jump_to_finally_sections(reentry)
self._connect_nodes(guard_ends, self.section_entry[section_id])
# Loop nodes always loop back.
self.leaves = set((self.section_entry[section_id],))
del self.continues[section_id]
del self.section_entry[section_id]
def enter_cond_section(self, section_id):
"""Enters a conditional section.
Conditional sections define an entry node, and one or more branches.
Args:
section_id: Hashable, the same node that will be used in calls to the
section_id arg passed to new_cond_branch
"""
assert section_id not in self.cond_entry
assert section_id not in self.cond_leaves
self.cond_leaves[section_id] = []
def new_cond_branch(self, section_id):
"""Begins a new branch in a cond section."""
assert section_id in self.cond_leaves
if section_id in self.cond_entry:
# Subsequent splits move back to the split point, and memorize the
# current leaves.
self.cond_leaves[section_id].append(self.leaves)
self.leaves = self.cond_entry[section_id]
else:
# If this is the first time we split a section, just remember the split
# point.
self.cond_entry[section_id] = self.leaves
def exit_cond_section(self, section_id):
"""Exits a conditional section."""
for split in self.cond_leaves[section_id]:
self.leaves |= split
del self.cond_entry[section_id]
del self.cond_leaves[section_id]
def enter_except_section(self, section_id):
"""Enters an except section."""
if section_id in self.raises:
self.leaves.update(self.raises[section_id])
def enter_finally_section(self, section_id):
"""Enters a finally section."""
# TODO(mdan): This, not the caller, should track the active sections.
self.finally_section_subgraphs[section_id] = [None, None]
if self.leaves:
self.finally_section_has_direct_flow[section_id] = True
else:
self.finally_section_has_direct_flow[section_id] = False
self.pending_finally_sections.add(section_id)
def exit_finally_section(self, section_id):
"""Exits a finally section."""
assert section_id not in self.pending_finally_sections, 'Empty finally?'
self.finally_section_subgraphs[section_id][1] = self.leaves
# If the guard can only be reached by a jump, then it will not flow
# into the statement that follows it.
if not self.finally_section_has_direct_flow[section_id]:
self.leaves = set()
del self.finally_section_has_direct_flow[section_id]
def build(self):
"""Returns the CFG accumulated so far and resets the builder.
Returns:
Graph
"""
# Freeze the nodes.
for node in self.node_index.values():
node.freeze()
# Build the statement edges.
stmt_next = {}
stmt_prev = {}
for node in self.node_index.values():
for stmt in self.owners[node]:
if stmt not in stmt_prev:
stmt_prev[stmt] = set()
if stmt not in stmt_next:
stmt_next[stmt] = set()
for first, second in self.forward_edges:
stmts_exited = self.owners[first] - self.owners[second]
for stmt in stmts_exited:
stmt_next[stmt].add(second)
stmts_entered = self.owners[second] - self.owners[first]
for stmt in stmts_entered:
stmt_prev[stmt].add(first)
for stmt in stmt_next:
stmt_next[stmt] = frozenset(stmt_next[stmt])
for stmt in stmt_prev:
stmt_prev[stmt] = frozenset(stmt_prev[stmt])
# Construct the final graph object.
result = Graph(
entry=self.head,
exit=self.leaves,
error=self.errors,
index=self.node_index,
stmt_prev=stmt_prev,
stmt_next=stmt_next)
# Reset the state.
self.reset()
return result
class AstToCfg(gast.NodeVisitor):
"""Converts an AST to CFGs.
A separate CFG will be constructed for each function.
"""
def __init__(self):
super(AstToCfg, self).__init__()
self.builder_stack = []
self.builder = None
self.cfgs = {}
self.lexical_scopes = []
def _enter_lexical_scope(self, node):
self.lexical_scopes.append(node)
def _exit_lexical_scope(self, node):
leaving_node = self.lexical_scopes.pop()
assert node == leaving_node
def _get_enclosing_finally_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.finalbody:
included.append(node)
if isinstance(node, stop_at):
return node, included
return None, included
def _get_enclosing_except_scopes(self, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, gast.Try) and node.handlers:
included.extend(node.handlers)
if isinstance(node, stop_at):
break
return included
def _process_basic_statement(self, node):
self.generic_visit(node)
self.builder.add_ordinary_node(node)
def _process_exit_statement(self,
node,
exits_nodes_of_type,
may_exit_via_except=False):
self.generic_visit(node)
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(exits_nodes_of_type)
assert try_node is not None, '{} that is not enclosed by any of {}'.format(
node, exits_nodes_of_type)
node = self.builder.add_exit_node(node, try_node, guards)
if may_exit_via_except:
except_guards = self._get_enclosing_except_scopes(exits_nodes_of_type)
self.builder.connect_raise_node(node, except_guards)
def _process_continue_statement(self, node, *loops_to_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_finally_scopes(
tuple(loops_to_nodes_of_type))
if try_node is None:
raise ValueError('%s that is not enclosed by any of %s' %
(node, loops_to_nodes_of_type))
self.builder.add_continue_node(node, try_node, guards)
def visit_ClassDef(self, node):
# We also keep the ClassDef node in the CFG, since it technically is a
# statement.
# For example, this is legal and allows executing user code:
#
# class Foo(bar()):
# pass
#
# It also has a scope:
#
# class Bar(object):
# a = 1
if self.builder is None:
self.generic_visit(node)
return
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self._process_basic_statement(node)
self._exit_lexical_scope(node)
# TODO(mdan): Track the CFG local to the class definition as well?
self.builder = self.builder_stack.pop()
def _process_function_def(self, node, is_lambda):
# The function body is stored in a separate graph, because function
# definitions have effects very different from function calls.
if self.builder is not None:
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self._process_basic_statement(node.args)
if is_lambda:
self._process_exit_statement(node.body, (gast.Lambda,))
else:
for stmt in node.body:
self.visit(stmt)
self.builder.exit_section(node)
self._exit_lexical_scope(node)
self.cfgs[node] = self.builder.build()
self.builder = self.builder_stack.pop()
def visit_FunctionDef(self, node):
self._process_function_def(node, is_lambda=False)
def visit_Lambda(self, node):
self._process_function_def(node, is_lambda=True)
def visit_Return(self, node):
self._process_exit_statement(node, (gast.FunctionDef,))
def visit_Import(self, node):
self._process_basic_statement(node)
def visit_ImportFrom(self, node):
self._process_basic_statement(node)
def visit_Expr(self, node):
self._process_basic_statement(node)
def visit_Assign(self, node):
self._process_basic_statement(node)
def visit_AnnAssign(self, node):
self._process_basic_statement(node)
def visit_AugAssign(self, node):
self._process_basic_statement(node)
def visit_Pass(self, node):
self._process_basic_statement(node)
def visit_Global(self, node):
self._process_basic_statement(node)
def visit_Nonlocal(self, node):
self._process_basic_statement(node)
def visit_Print(self, node):
self._process_basic_statement(node)
def visit_Raise(self, node):
self._process_exit_statement(
node, (gast.FunctionDef,), may_exit_via_except=True)
self.builder.errors.add(node)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
self._process_basic_statement(node)
def visit_Delete(self, node):
self._process_basic_statement(node)
def visit_If(self, node):
# No need to track ifs as lexical scopes, for now.
# Lexical scopes are generally tracked in order to be able to resolve the
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
self.builder.begin_statement(node)
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
self.builder.new_cond_branch(node)
for stmt in node.body:
self.visit(stmt)
self.builder.new_cond_branch(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_cond_section(node)
self.builder.end_statement(node)
def visit_While(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self.generic_visit(node.test)
self.builder.enter_loop_section(node, node.test)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# the statements inside it don't affect the loop itself. For example, a
# break in the loop's orelse will not affect the loop itself.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_For(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
# Note: Strictly speaking, this should be node.target + node.iter.
# However, the activity analysis accounts for this inconsistency,
# so dataflow analysis produces the correct values.
self.generic_visit(node.iter)
self.builder.enter_loop_section(node, node.iter)
# Also include the "extra loop test" annotation, to capture things like the
# control variable for return and break in for loops.
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
self._process_basic_statement(
anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST))
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# they don't count as loop bodies. For example, a break in the loop's
# orelse will affect the parent loop, not the current one.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_Break(self, node):
self._process_exit_statement(node, (
gast.While,
gast.For,
))
def visit_Continue(self, node):
self._process_continue_statement(node, (
gast.While,
gast.For,
))
def visit_ExceptHandler(self, node):
self.builder.begin_statement(node)
self.builder.enter_except_section(node)
if node.type is not None:
self.visit(node.type)
if node.name is not None:
self.visit(node.name)
for stmt in node.body:
self.visit(stmt)
self.builder.end_statement(node)
def visit_Try(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
# Note: the current simplification is that the try block fully executes
# regardless of whether an exception triggers or not. This is consistent
# with blocks free of try/except, which also don't account for the
# possibility of an exception being raised mid-block.
for stmt in node.body:
self.visit(stmt)
# The orelse is an optional continuation of the body.
if node.orelse:
block_representative = node.orelse[0]
self.builder.enter_cond_section(block_representative)
self.builder.new_cond_branch(block_representative)
for stmt in node.orelse:
self.visit(stmt)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
self._exit_lexical_scope(node)
if node.handlers:
# Using node would be inconsistent. Using the first handler node is also
# inconsistent, but less so.
block_representative = node.handlers[0]
self.builder.enter_cond_section(block_representative)
for block in node.handlers:
self.builder.new_cond_branch(block_representative)
self.visit(block)
self.builder.new_cond_branch(block_representative)
self.builder.exit_cond_section(block_representative)
if node.finalbody:
self.builder.enter_finally_section(node)
for stmt in node.finalbody:
self.visit(stmt)
self.builder.exit_finally_section(node)
self.builder.end_statement(node)
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
for item in node.items:
self._process_basic_statement(item)
for stmt in node.body:
self.visit(stmt)
def build(node):
visitor = AstToCfg()
visitor.visit(node)
return visitor.cfgs
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/cfg.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code transformation exceptions."""
import collections
from nvidia.dali._autograph.pyct import origin_info
class FrameInfo(
collections.namedtuple('FrameInfo',
('filename', 'lineno', 'function_name', 'code',
'is_converted', 'is_allowlisted'))):
__slots__ = ()
def _stack_trace_inside_mapped_code(tb, source_map, converter_filename):
"""Summarizes inner traceback frames up to the call to a given function.
This functions locates the innermost (i.e. most recent) frame that corresponds
to code that can be mapped by source_map originated from, and returns a
translated stack trace ending at that frame. If no such frame is found, the
entire stack trace is summarized.
For example, the following code:
def f():
for i in tf.range(1):
z = y + i # z only defined here
Would generate this traceback:
<converted code>
ag__.for_stmt(...)
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Which is then processed into:
<f>
for i in tf.range(1):
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Args:
tb: traceback.FrameSummary, The traceback corresponding to an error.
Typically, the output of traceback.Summary.extract(capture_locals=True).
source_map: Dict[LineLocation, OriginInfo], a source map as created by
origin_info.create_source_map.
converter_filename: str, the file path of the converted module. Call frames
corresponding to this module are elided and their preceding frames are
marked as allowlisted. Note that frames enclosing converted code are
dropped using a different mechanism.
Returns:
List[FrameInfo]
"""
result_frames = []
for filename, line_number, function_name, text in reversed(tb):
loc = origin_info.LineLocation(filename=filename, lineno=line_number)
if loc in source_map:
origin = source_map[loc]
fi = FrameInfo(
filename=origin.loc.filename,
lineno=origin.loc.lineno,
function_name=origin.function_name,
code=origin.source_code_line,
is_converted=True,
is_allowlisted=False)
result_frames.append(fi)
break
if filename == converter_filename:
if result_frames:
prev = result_frames[-1]
assert not prev.is_converted # See the if above.
fi = FrameInfo(
filename=prev.filename,
lineno=prev.lineno,
function_name=prev.function_name,
code=prev.code,
is_converted=False,
is_allowlisted=True)
result_frames[-1] = fi
continue
fi = FrameInfo(
filename=filename,
lineno=line_number,
function_name=function_name,
code=text,
is_converted=False,
is_allowlisted=False)
result_frames.append(fi)
return tuple(result_frames)
KNOWN_STRING_CONSTRUCTOR_ERRORS = (
AssertionError,
AttributeError,
NameError,
NotImplementedError,
RuntimeError,
StopIteration,
TypeError,
UnboundLocalError,
ValueError,
)
# KeyError escapes newlines in strings. We create a special subclass
# that doesn't do that. Overriding the name for display purposes; hopefully
# that won't create too many surprises.
class MultilineMessageKeyError(KeyError):
def __init__(self, message, original_key):
super(MultilineMessageKeyError, self).__init__(original_key)
self.__message = message
def __str__(self):
return self.__message
MultilineMessageKeyError.__name__ = KeyError.__name__
class ErrorMetadataBase(object):
"""Container objects attached to exceptions raised in user code.
This metadata allows re-raising exceptions that occur in generated code, with
a custom error message that includes a stack trace relative to user-readable
code from which the generated code originated.
"""
__slots__ = ('translated_stack', 'cause_message')
def __init__(self, callsite_tb, cause_metadata, cause_message, source_map,
converter_filename):
translated_stack = _stack_trace_inside_mapped_code(
callsite_tb, source_map, converter_filename)
if cause_metadata is None:
self.translated_stack = translated_stack
self.cause_message = cause_message
else:
# Daisy chain the translated stacks.
self.translated_stack = (
cause_metadata.translated_stack + (translated_stack[-1],))
self.cause_message = cause_metadata.cause_message
def get_message(self):
"""Returns the message for the underlying exception."""
lines = []
lines.append('in user code:')
lines.append('')
for frame_info in reversed(self.translated_stack):
# Same format with Python traceback.
formatted_line = (f' File "{frame_info.filename}", line '
f'{frame_info.lineno}, in {frame_info.function_name}')
if frame_info.is_converted:
formatted_line += ' *'
elif frame_info.is_allowlisted:
formatted_line += ' **'
lines.append(formatted_line)
if frame_info.code is None:
code_snippet = '<source unavailable>'
else:
code_snippet = frame_info.code.strip()
lines.append(' {}'.format(code_snippet))
lines.append('')
message_lines = self.cause_message.split('\n')
for i in range(len(message_lines)):
message_lines[i] = ' ' + message_lines[i]
lines.extend(message_lines)
lines.append('')
return '\n'.join(lines)
def create_exception(self, source_error):
"""Creates exception from source_error."""
preferred_type = type(source_error)
to_ret = None
if preferred_type.__init__ is Exception.__init__:
to_ret = preferred_type(self.get_message())
if preferred_type in KNOWN_STRING_CONSTRUCTOR_ERRORS:
to_ret = preferred_type(self.get_message())
elif preferred_type is KeyError:
to_ret = MultilineMessageKeyError(self.get_message(), self.cause_message)
if to_ret is not None:
return to_ret.with_traceback(source_error.__traceback__)
def to_exception(self, source_error):
exc = self.create_exception(source_error)
exc.__suppress_context__ = True
exc.ag_error_metadata = self
return exc
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/error_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Symbol naming utilities."""
from nvidia.dali._autograph.pyct import qual_names
class Namer(object):
"""Symbol name generator."""
def __init__(self, global_namespace):
self.global_namespace = global_namespace
self.generated_names = set()
def new_symbol(self, name_root, reserved_locals):
"""See control_flow.SymbolNamer.new_symbol."""
# reserved_locals may contain QNs.
all_reserved_locals = set()
for s in reserved_locals:
if isinstance(s, qual_names.QN):
all_reserved_locals.update(s.qn)
elif isinstance(s, str):
all_reserved_locals.add(s)
else:
raise ValueError('Unexpected symbol type "%s"' % type(s))
pieces = name_root.split('_')
if pieces[-1].isdigit():
name_root = '_'.join(pieces[:-1])
n = int(pieces[-1])
else:
n = 0
new_name = name_root
while (new_name in self.global_namespace or
new_name in all_reserved_locals or new_name in self.generated_names):
n += 1
new_name = '%s_%d' % (name_root, n)
self.generated_names.add(new_name)
return new_name
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/naming.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Caching utilities."""
import inspect
import weakref
# TODO(mdan): Add a garbage collection hook for cleaning up modules.
class _TransformedFnCache(object):
"""Generic hierarchical cache for transformed functions.
The keys are soft references (i.e. they are discarded when the key is
destroyed) created from the source function by `_get_key`. The subkeys are
strong references and can be any value. Typically they identify different
kinds of transformation.
"""
__slots__ = ('_cache',)
def __init__(self):
self._cache = weakref.WeakKeyDictionary()
def _get_key(self, entity):
raise NotImplementedError('subclasses must override')
def has(self, entity, subkey):
key = self._get_key(entity)
parent = self._cache.get(key, None)
if parent is None:
return False
return subkey in parent
def __getitem__(self, entity):
key = self._get_key(entity)
parent = self._cache.get(key, None)
if parent is None:
# The bucket is initialized to support this usage:
# cache[key][subkey] = value
self._cache[key] = parent = {}
return parent
def __len__(self):
return len(self._cache)
class CodeObjectCache(_TransformedFnCache):
"""A function cache based on code objects.
Code objects are good proxies for the source code of a function.
This cache efficiently handles functions that share code objects, such as
functions defined in a loop, bound methods, etc.
The cache falls back to the function object, if it doesn't have a code object.
"""
def _get_key(self, entity):
if hasattr(entity, '__code__'):
return entity.__code__
else:
return entity
class UnboundInstanceCache(_TransformedFnCache):
"""A function cache based on unbound function objects.
Using the function for the cache key allows efficient handling of object
methods.
Unlike the _CodeObjectCache, this discriminates between different functions
even if they have the same code. This is needed for decorators that may
masquerade as another function.
"""
def _get_key(self, entity):
if inspect.ismethod(entity):
return entity.__func__
return entity
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/cache.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python source code transformation library."""
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live entity inspection utilities.
This module contains whatever inspect doesn't offer out of the box.
"""
import inspect
import itertools
import linecache
import sys
import threading
import types
import six
# This lock seems to help avoid linecache concurrency errors.
_linecache_lock = threading.Lock()
# These functions test negative for isinstance(*, types.BuiltinFunctionType)
# and inspect.isbuiltin, and are generally not visible in globals().
# TODO(mdan): Remove this.
SPECIAL_BUILTINS = {
'dict': dict,
'enumerate': enumerate,
'float': float,
'int': int,
'len': len,
'list': list,
'print': print,
'range': range,
'tuple': tuple,
'type': type,
'zip': zip
}
def islambda(f):
if not inspect.isfunction(f):
return False
# TODO(mdan): Look into checking the only the code object.
if not (hasattr(f, '__name__') and hasattr(f, '__code__')):
return False
# Some wrappers can rename the function, but changing the name of the
# code object is harder.
return ((f.__name__ == '<lambda>') or (f.__code__.co_name == '<lambda>'))
def isnamedtuple(f):
"""Returns True if the argument is a namedtuple-like."""
if not (inspect.isclass(f) and issubclass(f, tuple)):
return False
if not hasattr(f, '_fields'):
return False
fields = getattr(f, '_fields')
if not isinstance(fields, tuple):
return False
if not all(isinstance(f, str) for f in fields):
return False
return True
def isbuiltin(f):
"""Returns True if the argument is a built-in function."""
if any(f is builtin for builtin in six.moves.builtins.__dict__.values()):
return True
elif isinstance(f, types.BuiltinFunctionType):
return True
elif inspect.isbuiltin(f):
return True
elif f is eval:
return True
else:
return False
def isconstructor(cls):
"""Returns True if the argument is an object constructor.
In general, any object of type class is a constructor, with the exception
of classes created using a callable metaclass.
See below for why a callable metaclass is not a trivial combination:
https://docs.python.org/2.7/reference/datamodel.html#customizing-class-creation
Args:
cls: Any
Returns:
Bool
"""
return (inspect.isclass(cls) and
not (issubclass(cls.__class__, type) and
hasattr(cls.__class__, '__call__') and
cls.__class__.__call__ is not type.__call__))
def _fix_linecache_record(obj):
"""Fixes potential corruption of linecache in the presence of functools.wraps.
functools.wraps modifies the target object's __module__ field, which seems
to confuse linecache in special instances, for example when the source is
loaded from a .par file (see https://google.github.io/subpar/subpar.html).
This function simply triggers a call to linecache.updatecache when a mismatch
was detected between the object's __module__ property and the object's source
file.
Args:
obj: Any
"""
if hasattr(obj, '__module__'):
obj_file = inspect.getfile(obj)
obj_module = obj.__module__
# A snapshot of the loaded modules helps avoid "dict changed size during
# iteration" errors.
loaded_modules = tuple(sys.modules.values())
for m in loaded_modules:
if hasattr(m, '__file__') and m.__file__ == obj_file:
if obj_module is not m:
linecache.updatecache(obj_file, m.__dict__)
def getimmediatesource(obj):
"""A variant of inspect.getsource that ignores the __wrapped__ property."""
with _linecache_lock:
_fix_linecache_record(obj)
lines, lnum = inspect.findsource(obj)
return ''.join(inspect.getblock(lines[lnum:]))
def getnamespace(f):
"""Returns the complete namespace of a function.
Namespace is defined here as the mapping of all non-local variables to values.
This includes the globals and the closure variables. Note that this captures
the entire globals collection of the function, and may contain extra symbols
that it does not actually use.
Args:
f: User defined function.
Returns:
A dict mapping symbol names to values.
"""
namespace = dict(six.get_function_globals(f))
closure = six.get_function_closure(f)
freevars = six.get_function_code(f).co_freevars
if freevars and closure:
for name, cell in zip(freevars, closure):
try:
namespace[name] = cell.cell_contents
except ValueError:
# Cell contains undefined variable, omit it from the namespace.
pass
return namespace
def getqualifiedname(namespace, object_, max_depth=5, visited=None):
"""Returns the name by which a value can be referred to in a given namespace.
If the object defines a parent module, the function attempts to use it to
locate the object.
This function will recurse inside modules, but it will not search objects for
attributes. The recursion depth is controlled by max_depth.
Args:
namespace: Dict[str, Any], the namespace to search into.
object_: Any, the value to search.
max_depth: Optional[int], a limit to the recursion depth when searching
inside modules.
visited: Optional[Set[int]], ID of modules to avoid visiting.
Returns: Union[str, None], the fully-qualified name that resolves to the value
o, or None if it couldn't be found.
"""
if visited is None:
visited = set()
# Copy the dict to avoid "changed size error" during concurrent invocations.
# TODO(mdan): This is on the hot path. Can we avoid the copy?
namespace = dict(namespace)
for name in namespace:
# The value may be referenced by more than one symbol, case in which
# any symbol will be fine. If the program contains symbol aliases that
# change over time, this may capture a symbol that will later point to
# something else.
# TODO(mdan): Prefer the symbol that matches the value type name.
if object_ is namespace[name]:
return name
# If an object is not found, try to search its parent modules.
parent = inspect.getmodule(object_)
if (parent is not None and parent is not object_ and parent is not namespace):
# No limit to recursion depth because of the guard above.
parent_name = getqualifiedname(
namespace, parent, max_depth=0, visited=visited)
if parent_name is not None:
name_in_parent = getqualifiedname(
parent.__dict__, object_, max_depth=0, visited=visited)
assert name_in_parent is not None, (
'An object should always be found in its owner module')
return '{}.{}'.format(parent_name, name_in_parent)
if max_depth:
# Iterating over a copy prevents "changed size due to iteration" errors.
# It's unclear why those occur - suspecting new modules may load during
# iteration.
for name in namespace.keys():
value = namespace[name]
if inspect.ismodule(value) and id(value) not in visited:
visited.add(id(value))
name_in_module = getqualifiedname(value.__dict__, object_,
max_depth - 1, visited)
if name_in_module is not None:
return '{}.{}'.format(name, name_in_module)
return None
def _get_unbound_function(m):
# TODO(mdan): Figure out why six.get_unbound_function fails in some cases.
# The failure case is for tf.keras.Model.
if hasattr(m, '__func__'):
return m.__func__
if hasattr(m, 'im_func'):
return m.im_func
return m
def getdefiningclass(m, owner_class):
"""Resolves the class (e.g. one of the superclasses) that defined a method."""
# Normalize bound functions to their respective unbound versions.
m = _get_unbound_function(m)
for superclass in reversed(inspect.getmro(owner_class)):
if hasattr(superclass, m.__name__):
superclass_m = getattr(superclass, m.__name__)
if _get_unbound_function(superclass_m) is m:
return superclass
elif hasattr(m, '__self__') and m.__self__ == owner_class:
# Python 3 class methods only work this way it seems :S
return superclass
return owner_class
def getmethodclass(m):
"""Resolves a function's owner, e.g.
a method's class.
Note that this returns the object that the function was retrieved from, not
necessarily the class where it was defined.
This function relies on Python stack frame support in the interpreter, and
has the same limitations that inspect.currentframe.
Limitations. This function will only work correctly if the owned class is
visible in the caller's global or local variables.
Args:
m: A user defined function
Returns:
The class that this function was retrieved from, or None if the function
is not an object or class method, or the class that owns the object or
method is not visible to m.
Raises:
ValueError: if the class could not be resolved for any unexpected reason.
"""
# Callable objects: return their own class.
if (not hasattr(m, '__name__') and hasattr(m, '__class__') and
hasattr(m, '__call__')):
if isinstance(m.__class__, six.class_types):
return m.__class__
# Instance and class: return the class of "self".
m_self = getattr(m, '__self__', None)
if m_self is not None:
if inspect.isclass(m_self):
return m_self
return m_self.__class__
# Class, static and unbound methods: search all defined classes in any
# namespace. This is inefficient but more robust a method.
owners = []
caller_frame = inspect.currentframe().f_back
try:
# TODO(mdan): This doesn't consider cell variables.
# TODO(mdan): This won't work if the owner is hidden inside a container.
# Cell variables may be pulled using co_freevars and the closure.
for v in itertools.chain(caller_frame.f_locals.values(),
caller_frame.f_globals.values()):
if hasattr(v, m.__name__):
candidate = getattr(v, m.__name__)
# Py2 methods may be bound or unbound, extract im_func to get the
# underlying function.
if hasattr(candidate, 'im_func'):
candidate = candidate.im_func
if hasattr(m, 'im_func'):
m = m.im_func
if candidate is m:
owners.append(v)
finally:
del caller_frame
if owners:
if len(owners) == 1:
return owners[0]
# If multiple owners are found, and are not subclasses, raise an error.
owner_types = tuple(o if inspect.isclass(o) else type(o) for o in owners)
for o in owner_types:
if inspect.isclass(o) and issubclass(o, tuple(owner_types)):
return o
raise ValueError('Found too many owners of %s: %s' % (m, owners))
return None
def getfutureimports(entity):
"""Detects what future imports are necessary to safely execute entity source.
Args:
entity: Any object
Returns:
A tuple of future strings
"""
if not (inspect.isfunction(entity) or inspect.ismethod(entity)):
return tuple()
return tuple(
sorted(name for name, value in entity.__globals__.items()
if getattr(value, '__module__', None) == '__future__'))
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/inspect_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST manipulation utilities."""
import ast
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
class CleanCopier(object):
"""NodeTransformer-like visitor that copies an AST."""
def __init__(self, preserve_annos):
super(CleanCopier, self).__init__()
self.preserve_annos = preserve_annos
def copy(self, node):
"""Returns a deep copy of node (excluding some fields, see copy_clean)."""
if isinstance(node, list):
return [self.copy(n) for n in node]
elif isinstance(node, tuple):
return tuple(self.copy(n) for n in node)
elif not isinstance(node, (gast.AST, ast.AST)):
# Assuming everything that's not an AST, list or tuple is a value type
# and may simply be assigned.
return node
assert isinstance(node, (gast.AST, ast.AST))
new_fields = {}
for f in node._fields:
if not f.startswith('__') and hasattr(node, f):
new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
if self.preserve_annos:
for k in self.preserve_annos:
anno.copyanno(node, new_node, k)
return new_node
def copy_clean(node, preserve_annos=None):
"""Creates a deep copy of an AST.
The copy will not include fields that are prefixed by '__', with the
exception of user-specified annotations.
Args:
node: ast.AST
preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
copy
Returns:
ast.AST
"""
return CleanCopier(preserve_annos).copy(node)
class SymbolRenamer(gast.NodeTransformer):
"""Transformer that can rename symbols to a simple names."""
def __init__(self, name_map):
self.name_map = name_map
def _process_name_node(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
new_node = gast.Name(
str(self.name_map[qn]),
ctx=node.ctx,
annotation=None,
type_comment=None)
# All annotations get carried over.
for k in anno.keys(node):
anno.copyanno(node, new_node, k)
return new_node
return self.generic_visit(node)
def _process_list_of_strings(self, names):
for i in range(len(names)):
qn = qual_names.QN(names[i])
if qn in self.name_map:
names[i] = str(self.name_map[qn])
return names
def visit_Nonlocal(self, node):
node.names = self._process_list_of_strings(node.names)
return node
def visit_Global(self, node):
node.names = self._process_list_of_strings(node.names)
return node
def visit_Name(self, node):
return self._process_name_node(node)
def visit_Attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
return self._process_name_node(node)
# Renaming attributes is not supported.
return self.generic_visit(node)
def visit_FunctionDef(self, node):
qn = qual_names.QN(node.name)
if qn in self.name_map:
node.name = str(self.name_map[qn])
return self.generic_visit(node)
def rename_symbols(node, name_map):
"""Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(renamer.visit(n) for n in node)
return renamer.visit(node)
def keywords_to_dict(keywords):
"""Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
keys.append(gast.Constant(kw.arg, kind=None))
values.append(kw.value)
return gast.Dict(keys=keys, values=values)
class PatternMatcher(gast.NodeVisitor):
"""Matches a node against a pattern represented by a node."""
def __init__(self, pattern):
self.pattern = pattern
self.pattern_stack = []
self.matches = True
def compare_and_visit(self, node, pattern):
self.pattern_stack.append(self.pattern)
self.pattern = pattern
self.generic_visit(node)
self.pattern = self.pattern_stack.pop()
def no_match(self):
self.matches = False
return False
def is_wildcard(self, p):
if isinstance(p, (list, tuple)) and len(p) == 1:
p, = p
if isinstance(p, gast.Name) and p.id == '_':
return True
if p == '_':
return True
return False
def generic_visit(self, node):
if not self.matches:
return
pattern = self.pattern
for f in node._fields:
if f.startswith('__'):
continue
if not hasattr(node, f):
if hasattr(pattern, f) and getattr(pattern, f):
return self.no_match()
else:
continue
if not hasattr(pattern, f):
return self.no_match()
v = getattr(node, f)
p = getattr(pattern, f)
if self.is_wildcard(p):
continue
if isinstance(v, (list, tuple)):
if not isinstance(p, (list, tuple)) or len(v) != len(p):
return self.no_match()
for v_item, p_item in zip(v, p):
self.compare_and_visit(v_item, p_item)
elif isinstance(v, (gast.AST, ast.AST)):
if not isinstance(v, type(p)) and not isinstance(p, type(v)):
return self.no_match()
self.compare_and_visit(v, p)
else:
# Assume everything else is a value type.
if v != p:
return self.no_match()
def matches(node, pattern):
"""Basic pattern matcher for AST.
The pattern may contain wildcards represented by the symbol '_'. A node
matches a pattern if for every node in the tree, either there is a node of
the same type in pattern, or a Name node with id='_'.
Args:
node: ast.AST
pattern: ast.AST
Returns:
bool
"""
if isinstance(pattern, str):
pattern = parser.parse_str(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
def apply_to_single_assignments(targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
used with the targets field of an ast.Assign node
values: ast.AST
apply_fn: Callable[[ast.AST, ast.AST], None], called with the
respective nodes of each single assignment
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
idx = parser.parse_expression(str(i))
value_el = gast.Subscript(values, idx, ctx=gast.Load())
apply_to_single_assignments(target_el, value_el, apply_fn)
else:
apply_fn(target, values)
def parallel_walk(node, other):
"""Walks two ASTs in parallel.
The two trees must have identical structure.
Args:
node: Union[ast.AST, Iterable[ast.AST]]
other: Union[ast.AST, Iterable[ast.AST]]
Yields:
Tuple[ast.AST, ast.AST]
Raises:
ValueError: if the two trees don't have identical structure.
"""
if isinstance(node, (list, tuple)):
node_stack = list(node)
else:
node_stack = [node]
if isinstance(other, (list, tuple)):
other_stack = list(other)
else:
other_stack = [other]
while node_stack and other_stack:
assert len(node_stack) == len(other_stack)
n = node_stack.pop()
o = other_stack.pop()
if ((not isinstance(n, (ast.AST, gast.AST, str)) and n is not None) or
(not isinstance(o, (ast.AST, gast.AST, str)) and n is not None) or
n.__class__.__name__ != o.__class__.__name__):
raise ValueError('inconsistent nodes: {} ({}) and {} ({})'.format(
n, n.__class__.__name__, o, o.__class__.__name__))
yield n, o
if isinstance(n, str):
assert isinstance(o, str), 'The check above should have ensured this'
continue
if n is None:
assert o is None, 'The check above should have ensured this'
continue
for f in n._fields:
n_child = getattr(n, f, None)
o_child = getattr(o, f, None)
if f.startswith('__') or n_child is None or o_child is None:
continue
if isinstance(n_child, (list, tuple)):
if (not isinstance(o_child, (list, tuple)) or
len(n_child) != len(o_child)):
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
node_stack.extend(n_child)
other_stack.extend(o_child)
elif isinstance(n_child, (gast.AST, ast.AST)):
node_stack.append(n_child)
other_stack.append(o_child)
elif n_child != o_child:
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/ast_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting code to AST.
Adapted from Tangent.
"""
import ast
import inspect
import linecache
import re
import sys
import textwrap
import tokenize
import astunparse
import gast
import six
from nvidia.dali._autograph.pyct import errors
from nvidia.dali._autograph.pyct import inspect_utils
PY2_PREAMBLE = textwrap.dedent("""
""")
PY3_PREAMBLE = ''
MAX_SIZE = 0
if sys.version_info >= (3, 9):
astunparse = ast # noqa: F811
if sys.version_info >= (3,):
STANDARD_PREAMBLE = PY3_PREAMBLE
MAX_SIZE = sys.maxsize
else:
STANDARD_PREAMBLE = PY2_PREAMBLE
MAX_SIZE = sys.maxint
STANDARD_PREAMBLE_LEN = STANDARD_PREAMBLE.count('__future__')
_LEADING_WHITESPACE = re.compile(r'\s*')
def _unfold_continuations(code_string):
"""Removes any backslash line continuations from the code."""
return code_string.replace('\\\n', '')
def dedent_block(code_string):
"""Dedents a code so that its first line starts at row zero."""
code_string = _unfold_continuations(code_string)
token_gen = tokenize.generate_tokens(six.StringIO(code_string).readline)
block_indentation = None
tokens = []
try:
for tok in token_gen:
tokens.append(tok)
except tokenize.TokenError:
# Resolution of lambda functions may yield incomplete code, which can
# in turn generate this error. We silently ignore this error because the
# parser may still be able to deal with it.
pass
for tok in tokens:
tok_type, tok_string, _, _, _ = tok
if tok_type == tokenize.INDENT:
block_indentation = tok_string
block_level = len(block_indentation)
break
elif tok_type not in (
tokenize.NL, tokenize.NEWLINE, tokenize.STRING, tokenize.COMMENT):
block_indentation = ''
break
if not block_indentation:
return code_string
block_level = len(block_indentation)
first_indent_uses_tabs = '\t' in block_indentation
for i, tok in enumerate(tokens):
tok_type, tok_string, _, _, _ = tok
if tok_type == tokenize.INDENT:
if ((' ' in tok_string and first_indent_uses_tabs)
or ('\t' in tok_string and not first_indent_uses_tabs)):
# TODO(mdan): We could attempt to convert tabs to spaces by unix rule.
# See:
# https://docs.python.org/3/reference/lexical_analysis.html#indentation
raise errors.UnsupportedLanguageElementError(
'code mixing tabs and spaces for indentation is not allowed')
if len(tok_string) >= block_level:
tok_string = tok_string[block_level:]
tokens[i] = (tok_type, tok_string)
new_code = tokenize.untokenize(tokens)
# Note: untokenize respects the line structure, but not the whitespace within
# lines. For example, `def foo()` may be untokenized as `def foo ()`
# So instead of using the output of dedent, we match the leading whitespace
# on each line.
dedented_code = []
for line, new_line in zip(code_string.split('\n'), new_code.split('\n')):
original_indent = re.match(_LEADING_WHITESPACE, line).group()
new_indent = re.match(_LEADING_WHITESPACE, new_line).group()
if len(original_indent) > len(new_indent):
dedented_line = line[len(original_indent) - len(new_indent):]
else:
dedented_line = line
dedented_code.append(dedented_line)
new_code = '\n'.join(dedented_code)
return new_code
def parse_entity(entity, future_features):
"""Returns the AST and source code of given entity.
Args:
entity: Any, Python function/method/class
future_features: Iterable[Text], future features to use (e.g.
'print_statement'). See
https://docs.python.org/2/reference/simple_stmts.html#future
Returns:
gast.AST, Text: the parsed AST node; the source code that was parsed to
generate the AST (including any prefixes that this function may have added).
"""
if inspect_utils.islambda(entity):
return _parse_lambda(entity)
try:
original_source = inspect_utils.getimmediatesource(entity)
except OSError as e:
raise errors.InaccessibleSourceCodeError(
f'Unable to locate the source code of {entity}. Note that functions'
' defined in certain environments, like the interactive Python shell,'
' do not expose their source code. If that is the case, you should'
' define them in a .py source file. If you are certain the code is'
' graph-compatible, wrap the call in the do_not_convert decorator.'
f' Original error: {e}')
source = dedent_block(original_source)
future_statements = tuple(
'from __future__ import {}'.format(name) for name in future_features)
source = '\n'.join(future_statements + (source,))
return parse(source, preamble_len=len(future_features)), source
def _without_context(node, lines, minl, maxl):
"""Returns a clean node and source code without indenting and context."""
for n in gast.walk(node):
lineno = getattr(n, 'lineno', None)
if lineno is not None:
n.lineno = lineno - minl
end_lineno = getattr(n, 'end_lineno', None)
if end_lineno is not None:
n.end_lineno = end_lineno - minl
code_lines = lines[minl - 1:maxl]
# Attempt to clean up surrounding context code.
end_col_offset = getattr(node, 'end_col_offset', None)
if end_col_offset is not None:
# This is only available in 3.8.
code_lines[-1] = code_lines[-1][:end_col_offset]
col_offset = getattr(node, 'col_offset', None)
if col_offset is None:
# Older Python: try to find the "lambda" token. This is brittle.
match = re.search(r'(?<!\w)lambda(?!\w)', code_lines[0])
if match is not None:
col_offset = match.start(0)
if col_offset is not None:
code_lines[0] = code_lines[0][col_offset:]
code_block = '\n'.join([c.rstrip() for c in code_lines])
return node, code_block
def _arg_name(node):
if node is None:
return None
if isinstance(node, gast.Name):
return node.id
assert isinstance(node, str)
return node
def _node_matches_argspec(node, func):
"""Returns True is node fits the argspec of func."""
# TODO(mdan): Use just inspect once support for Python 2 is dropped.
arg_spec = inspect.getfullargspec(func)
node_args = tuple(_arg_name(arg) for arg in node.args.args)
if node_args != tuple(arg_spec.args):
return False
if arg_spec.varargs != _arg_name(node.args.vararg):
return False
if arg_spec.varkw != _arg_name(node.args.kwarg):
return False
node_kwonlyargs = tuple(_arg_name(arg) for arg in node.args.kwonlyargs)
if node_kwonlyargs != tuple(arg_spec.kwonlyargs):
return False
return True
def _parse_lambda(lam):
"""Returns the AST and source code of given lambda function.
Args:
lam: types.LambdaType, Python function/method/class
Returns:
gast.AST, Text: the parsed AST node; the source code that was parsed to
generate the AST (including any prefixes that this function may have added).
"""
# TODO(mdan): Use a fast path if the definition is not multi-line.
# We could detect that the lambda is in a multi-line expression by looking
# at the surrounding code - an surrounding set of parentheses indicates a
# potential multi-line definition.
mod = inspect.getmodule(lam)
f = inspect.getsourcefile(lam)
def_line = lam.__code__.co_firstlineno
# This method is more robust that just calling inspect.getsource(mod), as it
# works in interactive shells, where getsource would fail. This is the
# same procedure followed by inspect for non-modules:
# https://github.com/python/cpython/blob/3.8/Lib/inspect.py#L772
lines = linecache.getlines(f, mod.__dict__)
source = ''.join(lines)
# Narrow down to the last node starting before our definition node.
all_nodes = parse(source, preamble_len=0, single_node=False)
search_nodes = []
for node in all_nodes:
# Also include nodes without a line number, for safety. This is defensive -
# we don't know whether such nodes might exist, and if they do, whether
# they are not safe to skip.
# TODO(mdan): Replace this check with an assertion or skip such nodes.
if getattr(node, 'lineno', def_line) <= def_line:
search_nodes.append(node)
else:
# Found a node starting past our lambda - can stop the search.
break
# Extract all lambda nodes from the shortlist.
lambda_nodes = []
for node in search_nodes:
lambda_nodes.extend(
n for n in gast.walk(node) if isinstance(n, gast.Lambda))
# Filter down to lambda nodes which span our actual lambda.
candidates = []
for ln in lambda_nodes:
minl, maxl = MAX_SIZE, 0
for n in gast.walk(ln):
minl = min(minl, getattr(n, 'lineno', minl))
lineno = getattr(n, 'lineno', maxl)
end_lineno = getattr(n, 'end_lineno', None)
if end_lineno is not None:
# end_lineno is more precise, but lineno should almost always work too.
lineno = end_lineno
maxl = max(maxl, lineno)
if minl <= def_line <= maxl:
candidates.append((ln, minl, maxl))
# Happy path: exactly one node found.
if len(candidates) == 1:
(node, minl, maxl), = candidates # pylint:disable=unbalanced-tuple-unpacking
return _without_context(node, lines, minl, maxl)
elif not candidates:
lambda_codes = '\n'.join([unparse(l) for l in lambda_nodes])
raise errors.UnsupportedLanguageElementError(
f'could not parse the source code of {lam}:'
f' no matching AST found among candidates:\n{lambda_codes}')
# Attempt to narrow down selection by signature is multiple nodes are found.
matches = [v for v in candidates if _node_matches_argspec(v[0], lam)]
if len(matches) == 1:
(node, minl, maxl), = matches
return _without_context(node, lines, minl, maxl)
# Give up if could not narrow down to a single node.
matches = '\n'.join(
'Match {}:\n{}\n'.format(i, unparse(node, include_encoding_marker=False))
for i, (node, _, _) in enumerate(matches))
raise errors.UnsupportedLanguageElementError(
f'could not parse the source code of {lam}: found multiple definitions'
' with identical signatures at the location. This error'
' may be avoided by defining each lambda on a single line and with'
f' unique argument names. The matching definitions were:\n{matches}')
# TODO(mdan): This should take futures as input instead.
def parse(src, preamble_len=0, single_node=True):
"""Returns the AST of given piece of code.
Args:
src: Text
preamble_len: Int, indicates leading nodes in the parsed AST which should be
dropped.
single_node: Bool, whether `src` is assumed to be represented by exactly one
AST node.
Returns:
ast.AST
"""
module_node = gast.parse(src)
nodes = module_node.body
if preamble_len:
nodes = nodes[preamble_len:]
if single_node:
if len(nodes) != 1:
raise ValueError('expected exactly one node, got {}'.format(nodes))
return nodes[0]
return nodes
def parse_expression(src):
"""Returns the AST of given identifier.
Args:
src: A piece of code that represents a single Python expression
Returns:
A gast.AST object.
Raises:
ValueError: if src does not consist of a single Expression.
"""
src = STANDARD_PREAMBLE + src.strip()
node = parse(src, preamble_len=STANDARD_PREAMBLE_LEN, single_node=True)
if __debug__:
if not isinstance(node, gast.Expr):
raise ValueError(
'expected exactly one node of type Expr, got {}'.format(node))
return node.value
def unparse(node, indentation=None, include_encoding_marker=True):
"""Returns the source code of given AST.
Args:
node: The code to compile, as an AST object.
indentation: Unused, deprecated. The returning code will always be indented
at 4 spaces.
include_encoding_marker: Bool, whether to include a comment on the first
line to explicitly specify UTF-8 encoding.
Returns:
code: The source code generated from the AST object
source_mapping: A mapping between the user and AutoGraph generated code.
"""
del indentation # astunparse doesn't allow configuring it.
if not isinstance(node, (list, tuple)):
node = (node,)
codes = []
if include_encoding_marker:
codes.append('# coding=utf-8')
for n in node:
if isinstance(n, gast.AST):
ast_n = gast.gast_to_ast(n)
else:
ast_n = n
if astunparse is ast:
ast.fix_missing_locations(ast_n) # Only ast needs to call this.
codes.append(astunparse.unparse(ast_n).strip())
return '\n'.join(codes)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/parser.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic source code transformation infrastructure."""
import inspect
import threading
import types
import gast
from nvidia.dali._autograph.pyct import cache
from nvidia.dali._autograph.pyct import gast_util
from nvidia.dali._autograph.pyct import inspect_utils
from nvidia.dali._autograph.pyct import loader
from nvidia.dali._autograph.pyct import naming
from nvidia.dali._autograph.pyct import origin_info
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.utils import ag_logging as logging
def _wrap_into_factory(nodes, entity_name, inner_factory_name,
outer_factory_name, closure_vars, factory_args,
future_features):
"""Wraps an AST into the body of a factory with consistent lexical context.
The AST is expected to define some symbol with a name given by `entity_name`.
This mechanism ensures that the resulting transformed entity has lexical
scoping identical to that of the source entity, while allowing extra
parametrization.
Two nested factories achieve the following:
1. The inner factory dynamically creates the entity represented by `nodes`.
2. The inner factory is parametrized by a custom set of arguments.
3. The inner factory has a closure identical to that of the transformed
entity.
4. The inner factory has local variables named like `args`, which `nodes` may
use as additional parameters.
5. The inner factory returns the variables given by `entity_name`.
6. The outer factory is niladic.
7. The outer factory has no closure.
8. The outer factory creates the necessary lexical scope for the inner
factory, so that the loaded code has the given configuration for
closure/globals.
9. The outer factory returns the inner factory.
Roughly speaking, the following code is generated:
from __future__ import future_feature_1
from __future__ import future_feature_2
...
def outer_factory():
closure_var_1 = None
closure_var_2 = None
...
def inner_factory(arg_1, arg_2, ...):
<<nodes>>
return entity
return inner_factory
The lexical scoping is created using dummy symbol declarations which create
local variables in the body of the outer factory, so that the Python parser
correctly marks them as free non-global variables upon load (that is, it
creates cell slots for each symbol. These symbols are initialized with None,
but their values are not expected to be used; instead, the caller is expected
to replace them with the cells of the source entity. For more details, see:
https://docs.python.org/3/reference/executionmodel.html#binding-of-names
Args:
nodes: Tuple[ast.AST], the source code to wrap.
entity_name: Union[Text, ast.AST], the name of the principal entity that
`nodes` define.
inner_factory_name: Text, the name of the inner factory.
outer_factory_name: Text, the name of the outer factory.
closure_vars: Iterable[Text], names of the closure variables for the inner
factory.
factory_args: Iterable[Text], names of additional arguments for the
inner factory. Useful to configure variables that the converted code can
use. Typically, these are modules.
future_features: Iterable[Text], names of future statements to associate the
code with.
Returns:
ast.AST
"""
dummy_closure_defs = []
for var_name in closure_vars:
template = """
var_name = None
"""
dummy_closure_defs.extend(templates.replace(template, var_name=var_name))
if future_features:
future_imports = gast.ImportFrom(
module='__future__',
names=[gast.alias(name=name, asname=None) for name in future_features],
level=0)
else:
future_imports = []
factory_args = [
gast.Name(name, ctx=gast.Param(), annotation=None, type_comment=None)
for name in factory_args
]
template = """
future_imports
def outer_factory_name():
dummy_closure_defs
def inner_factory_name(factory_args):
entity_defs
return entity_name
return inner_factory_name
"""
return templates.replace(
template,
dummy_closure_defs=dummy_closure_defs,
entity_defs=nodes,
entity_name=entity_name,
factory_args=factory_args,
future_imports=future_imports,
inner_factory_name=inner_factory_name,
outer_factory_name=outer_factory_name)
class _PythonFnFactory(object):
"""Helper object that wraps a Python function factory."""
def __init__(self, name, freevars, extra_locals):
"""Creates a new factory for a Python function.
Args:
name: The function name.
freevars: The list of non-global free variables for the function.
extra_locals: Dict[Text, Any], names and values for custom variables that
are accessible to the generated code as local variables.
"""
self._name = name
self._freevars = freevars
self._extra_locals = extra_locals
self._unbound_factory = None
self.module = None
self.source_map = None
def create(self,
nodes,
namer,
inner_factory_name='inner_factory',
outer_factory_name='outer_factory',
future_features=()):
"""Initializes a function."""
if self._unbound_factory is not None:
raise ValueError('double initialization; create a new object instead')
inner_factory_name = namer.new_symbol(inner_factory_name, ())
outer_factory_name = namer.new_symbol(outer_factory_name, ())
nodes = _wrap_into_factory(nodes, self._name, inner_factory_name,
outer_factory_name, self._freevars,
self._extra_locals.keys(), future_features)
module, _, source_map = loader.load_ast(
nodes, include_source_map=True)
outer_factory = getattr(module, outer_factory_name)
self._unbound_factory = outer_factory()
self.module = module
self.source_map = source_map
def instantiate(self,
globals_,
closure,
defaults=None,
kwdefaults=None):
"""Creates a new function instance."""
if self._unbound_factory is None:
raise ValueError('call create first')
factory_code = self._unbound_factory.__code__
factory_freevars = factory_code.co_freevars
closure_map = dict(zip(self._freevars, closure))
factory_closure = tuple(
closure_map[name] for name in factory_code.co_freevars)
if len(factory_closure) != len(closure):
raise ValueError(
'closure mismatch, requested {}, but source function had {}'.format(
self._freevars, factory_freevars))
bound_factory = types.FunctionType(
code=factory_code,
globals=globals_,
name=self._name,
argdefs=(),
closure=factory_closure)
# The lint override is a false positive.
new_fn = bound_factory(**self._extra_locals) # pylint:disable=not-callable
if defaults:
new_fn.__defaults__ = defaults
if kwdefaults:
new_fn.__kwdefaults__ = kwdefaults
return new_fn
class GenericTranspiler(object):
"""A generic transpiler for Python functions.
Its interface is the `transform` API, which can process Python function
objects. Internally, it handles parsing.
Users typically subclass this, customizing the `transform_ast` method. The
output of transformed_ast is returned directly by `transform`. Existing
methods like `transform_function` may also be overloaded.
Example:
class MyTransformer(GenericTranspiler):
def transform_ast(self, node, ctx):
result = <<transform node>>
return result
transformer = MyTransfomer()
result = transformer.transform(f, ...)
# result is the output
"""
def get_transformed_name(self, node):
"""Returns a name for the output function. Subclasses may override this."""
if isinstance(node, gast.Lambda):
return 'lam'
elif isinstance(node, gast.FunctionDef):
return node.name
raise ValueError('Unknown node type {}'.format(node))
def transform_ast(self, node, ctx):
"""Performs an actual transformation of a function's AST.
Subclasses must implement this method, and do not usually call it.
Args:
node: One or more ast.AST nodes representing the AST to be transformed.
ctx: transformer.Context.
"""
raise NotImplementedError('subclasses must override this')
def transform(self, obj, user_context):
"""Transforms a Python object.
Users typically call this method.
Args:
obj: A Python object, function, type, etc.
user_context: An opaque object (may be None) that is forwarded to
transform_ast, through the ctx.user_context argument.
Returns:
The result of calling transform_function.
Raises:
NotImplementedError: if the type of obj is not handled.
"""
if inspect.isfunction(obj) or inspect.ismethod(obj):
return self.transform_function(obj, user_context)
raise NotImplementedError('Non-function: {}'.format(type(obj)))
def _erase_arg_defaults(self, node):
"""Erase arg default expressions, which would otherwise be unbound."""
args = node.args
for i in range(len(args.defaults)):
args.defaults[i] = parser.parse_expression('None')
for i, d in enumerate(args.kw_defaults):
if d is not None:
args.kw_defaults[i] = parser.parse_expression('None')
return node
def transform_module(self, mod, user_context):
"""Transforms a module.
Subclasses may override this method. The return value is opaque.
The method receives the original AST. The result is passed as-is to the
output of `transform`.
Args:
mod: A Python module.
user_context: An opaque object (may be None) that is forwarded to
transform_ast, through the ctx.user_context argument.
Returns:
List[Tuple[Any, Any]]. By default it returns the output of transform_ast,
evaluated on each supported member, other than modules, together with a
`transformer.Context` containing information about the transformation
process.
"""
result = []
for member in mod.__dict__.values():
if inspect.ismodule(member):
continue # Not transforming modules recursively.
try:
result.append(self.transform(member, user_context))
except NotImplementedError:
pass # Skip unsupported elements.
return result
def transform_function(self, fn, user_context):
"""Transforms a function.
Subclasses may override this method. The return value is opaque.
The method receives the original AST. The result is passed as-is to the
output of `transform`.
Args:
fn: A function or lambda.
user_context: An opaque object (may be None) that is forwarded to
transform_ast, through the ctx.user_context argument.
Returns:
Tuple[Any, Any]. By default it returns the output of transform_ast,
together with a `transformer.Context` containing information about the
transformation process.
"""
future_features = inspect_utils.getfutureimports(fn)
node, source = parser.parse_entity(fn, future_features=future_features)
logging.log(3, 'Source code of %s:\n\n%s\n', fn, source)
origin_info.resolve_entity(node, source, fn)
namespace = inspect_utils.getnamespace(fn)
namer = naming.Namer(namespace)
new_name = namer.new_symbol(self.get_transformed_name(node), ())
entity_info = transformer.EntityInfo(
name=new_name,
source_code=source,
source_file='<fragment>',
future_features=future_features,
namespace=namespace)
context = transformer.Context(entity_info, namer, user_context)
node = self._erase_arg_defaults(node)
result = self.transform_ast(node, context)
return result, context
class PyToPy(GenericTranspiler):
"""A generic Python-to-Python transpiler.
Its `transform` method offers a function-in, function-out interface.
Internally, it takes care of parsing, caching and loading of the translated
code.
Users typically subclass this, overriding `transform_ast`.
Usually, instances of this class are singletons, since each instance manages
its own cache. The caching can be controlled by overriding `get_caching_key`.
Example:
class MyTransformer(PyToPy):
def transform_ast(self, node, ctx):
node = <<transform node, usually using ast.NodeTransformer classes>>
return node
transformer = MyTransfomer()
new_f, module, source_map = transformer.transform_function(f, ...)
# new_f is a function with signature identical to f
The transformed function has access to the same namespace as the original
function. To allow access to internal APIs, users may inject additional
symbols by overriding `get_extra_locals`.
"""
def __init__(self):
self._cache_lock = threading.RLock()
self._cache = cache.CodeObjectCache()
def get_extra_locals(self):
"""Returns extra static local variables to be made to transformed code.
Subclasses must override this.
Returns:
extra_locals: A Dict[Text, Any] containing additional variables to make
available to the transformed code.
"""
raise NotImplementedError('subclasses must override this')
def get_caching_key(self, user_context):
"""Returns a unique key to use for caching.
Subclasses must override this.
Calls made to `transform_function` with functions that have the same code
object and caching key will return a cached instance on subsequent
invocations.
Args:
user_context: The context object which was passed to `transform`.
Returns:
extra_locals: A hashable.
"""
raise NotImplementedError('subclasses must override this')
def _cached_factory(self, fn, cache_subkey):
cached_factory = self._cache[fn][cache_subkey]
logging.log(3, 'Cache hit for %s subkey %s: %s', fn, cache_subkey,
cached_factory)
return cached_factory
def transform_function(self, fn, user_context):
"""Transforms a function. See GenericTranspiler.trasnform_function.
This overload wraps the parent's `transform_function`, adding caching and
facilities to instantiate the output as a Python object. It also
adds facilities to make new symbols available to the generated Python code,
visible as local variables - see `get_extra_locals`.
Args:
fn: A function or lambda.
user_context: An opaque object (may be None) that is forwarded to
transform_ast, through the ctx.user_context argument.
Returns:
A tuple:
* A function or lambda with the same signature and closure as `fn`
* The temporary module into which the transformed function was loaded
* The source map as a
Dict[origin_info.LineLocation, origin_info.OriginInfo]
"""
cache_subkey = self.get_caching_key(user_context)
if self._cache.has(fn, cache_subkey):
# Fast path: use a lock-free check.
factory = self._cached_factory(fn, cache_subkey)
else:
with self._cache_lock:
# Check again under lock.
if self._cache.has(fn, cache_subkey):
factory = self._cached_factory(fn, cache_subkey)
else:
logging.log(1, '%s is not cached for subkey %s', fn, cache_subkey)
# TODO(mdan): Confusing overloading pattern. Fix.
nodes, ctx = super(PyToPy, self).transform_function(fn, user_context)
if isinstance(nodes, gast.Lambda):
nodes = gast_util.compat_assign(
targets=[
gast.Name(
ctx.info.name,
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=nodes,
type_comment=None)
else:
nodes.name = ctx.info.name
if logging.has_verbosity(2):
logging.log(2, 'Transformed %s:\n\n%s\n', fn, parser.unparse(nodes))
factory = _PythonFnFactory(
ctx.info.name, fn.__code__.co_freevars, self.get_extra_locals())
factory.create(
nodes, ctx.namer, future_features=ctx.info.future_features)
self._cache[fn][cache_subkey] = factory
transformed_fn = factory.instantiate(
globals_=fn.__globals__,
closure=fn.__closure__ or (),
defaults=fn.__defaults__,
kwdefaults=getattr(fn, '__kwdefaults__', None))
return transformed_fn, factory.module, factory.source_map
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/transpiler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST conversion templates.
Adapted from Tangent.
"""
import ast
import textwrap
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import ast_util
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
class ContextAdjuster(gast.NodeTransformer):
"""Adjusts the ctx field of nodes to ensure consistency.
This transformer can change the ctx fields of a variable, tuple and other
AST elements that allow one, based on whether the element is being read or
written.
"""
def __init__(self, override_value):
self._ctx_override = override_value
def visit(self, node):
original_override = self._ctx_override
node = super(ContextAdjuster, self).visit(node)
if hasattr(node, 'ctx'):
assert node.ctx is not None, 'node {} has ctx unset'.format(node)
self._ctx_override = original_override
return node
def _apply_override(self, node):
if self._ctx_override is not None:
node.ctx = self._ctx_override()
def visit_Attribute(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node = self.generic_visit(node)
return node
def visit_Tuple(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_List(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Name(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Call(self, node):
self._apply_override(node)
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Dict(self, node):
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Subscript(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node.value = self.visit(node.value)
return self.generic_visit(node)
def visit_comprehension(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Lambda(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
class ReplaceTransformer(gast.NodeTransformer):
"""Replace AST nodes."""
def __init__(self, replacements):
"""Create a new ReplaceTransformer.
Args:
replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by.
"""
self.replacements = replacements
self.in_replacements = False
self.preserved_annos = {
anno.Basic.DIRECTIVES,
anno.Basic.EXTRA_LOOP_TEST,
anno.Basic.ORIGIN,
anno.Basic.SKIP_PROCESSING,
anno.Static.ORIG_DEFINITIONS,
'function_context_name',
}
def _prepare_replacement(self, replaced, key):
"""Prepares a replacement AST that's safe to swap in for a node.
Args:
replaced: ast.AST, the node being replaced
key: Hashable, the key of the replacement AST
Returns:
ast.AST, the replacement AST
"""
repl = self.replacements[key]
new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
if isinstance(new_nodes, gast.AST):
new_nodes = [new_nodes]
return new_nodes
def visit_Expr(self, node):
# When replacing a placeholder with an entire statement, the replacement
# must stand on its own and not be wrapped in an Expr.
new_value = self.visit(node.value)
if new_value is node.value:
return node
return new_value
def visit_keyword(self, node):
if node.arg not in self.replacements:
return self.generic_visit(node)
repl = self._prepare_replacement(node, node.arg)
if isinstance(repl, gast.keyword):
return repl
elif (repl and isinstance(repl, (list, tuple)) and
all(isinstance(r, gast.keyword) for r in repl)):
return repl
# TODO(mdan): We may allow replacing with a string as well.
# For example, if one wanted to replace foo with bar in foo=baz, then
# we could allow changing just node arg, so that we end up with bar=baz.
raise ValueError(
'a keyword argument may only be replaced by another keyword or a '
'non-empty list of keywords. Found: {} for keyword {}'.format(
repl, node.arg))
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
if node.name not in self.replacements:
return node
repl = self.replacements[node.name]
if not isinstance(repl, (gast.Name, ast.Name)):
raise ValueError(
'a function name can only be replaced by a Name node. Found: %s' %
repl)
node.name = repl.id
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
'An attribute can only be replaced by a Name node. Found: %s' % repl)
node.attr = repl.id
return node
def visit_Name(self, node):
if node.id not in self.replacements:
return node
new_nodes = self._prepare_replacement(node, node.id)
if not new_nodes:
return new_nodes
# Preserve the target context.
adjuster = ContextAdjuster(type(node.ctx))
for n in new_nodes:
if hasattr(n, 'ctx'):
adjuster.visit(n)
if len(new_nodes) == 1:
new_nodes, = new_nodes
return new_nodes
def _convert_to_ast(n):
"""Converts from a known data type to AST."""
# Note: When generating AST nodes from strings/QNs in isolation, ctx is
# unknown. ctx must be filled in according to the template being used.
# See ReplaceTransformer.visit_Name.
if isinstance(n, str):
return gast.Name(id=n, ctx=None, annotation=None, type_comment=None)
if isinstance(n, qual_names.QN):
return n.ast()
if isinstance(n, list):
return [_convert_to_ast(e) for e in n]
if isinstance(n, tuple):
return tuple(_convert_to_ast(e) for e in n)
return n
def replace(template, **replacements):
"""Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
contain Name children), then the caller is responsible for setting the
appropriate context.
Args:
template: A string representing Python code. Any symbol name can be used
that appears in the template code can be used as placeholder.
**replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by. String values are also
supported as a shorthand for AST Name nodes with the respective ID.
Returns:
An AST node or list of AST nodes with the replacements made. If the
template was a function, a list will be returned. If the template was a
node, the same node will be returned. If the template was a string, an
AST node will be returned (a `Module` node in the case of a multi-line
string, an `Expr` node otherwise).
Raises:
ValueError: if the arguments are incorrect.
"""
if not isinstance(template, str):
raise ValueError('Expected string template, got %s' % type(template))
for k in replacements:
replacements[k] = _convert_to_ast(replacements[k])
template_str = parser.STANDARD_PREAMBLE + textwrap.dedent(template)
nodes = parser.parse(
template_str,
preamble_len=parser.STANDARD_PREAMBLE_LEN,
single_node=False)
results = []
for node in nodes:
node = ReplaceTransformer(replacements).visit(node)
if isinstance(node, (list, tuple)):
results.extend(node)
else:
results.append(node)
results = [qual_names.resolve(r) for r in results]
return results
def replace_as_expression(template, **replacements):
"""Variant of replace that generates expressions, instead of code blocks."""
replacement = replace(template, **replacements)
if len(replacement) != 1:
raise ValueError(
'single expression expected; for more general templates use replace')
node, = replacement
if isinstance(node, gast.Expr):
return node.value
elif isinstance(node, gast.Name):
return node
raise ValueError(
'the template is expected to generate an expression or a name node;'
' instead found %s' % node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/templates.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Print an AST tree in a form more readable than ast.dump."""
import gast
import six
class PrettyPrinter(gast.NodeVisitor):
"""Print AST nodes."""
def __init__(self, color, noanno):
self.indent_lvl = 0
self.result = ''
self.color = color
self.noanno = noanno
def _color(self, string, color, attrs=None):
# TODO(klecki): Add support for color if needed. Currently its disabled to not inflate
# the list of DALI dependencies.
# if self.color:
# return termcolor.colored(string, color, attrs=attrs)
return string
def _type(self, node):
return self._color(node.__class__.__name__, None, ['bold'])
def _field(self, name):
return self._color(name, 'blue')
def _value(self, name):
return self._color(name, 'magenta')
def _warning(self, name):
return self._color(name, 'red')
def _indent(self):
return self._color('| ' * self.indent_lvl, None, ['dark'])
def _print(self, s):
self.result += s
self.result += '\n'
def generic_visit(self, node, name=None):
# In very rare instances, a list can contain something other than a Node.
# e.g. Global contains a list of strings.
if isinstance(node, str):
if name:
self._print('%s%s="%s"' % (self._indent(), name, node))
else:
self._print('%s"%s"' % (self._indent(), node))
return
if node._fields:
cont = ':'
else:
cont = '()'
if name:
self._print('%s%s=%s%s' % (self._indent(), self._field(name),
self._type(node), cont))
else:
self._print('%s%s%s' % (self._indent(), self._type(node), cont))
self.indent_lvl += 1
for f in node._fields:
if self.noanno and f.startswith('__'):
continue
if not hasattr(node, f):
self._print('%s%s' % (self._indent(), self._warning('%s=<unset>' % f)))
continue
v = getattr(node, f)
if isinstance(v, list):
if v:
self._print('%s%s=[' % (self._indent(), self._field(f)))
self.indent_lvl += 1
for n in v:
if n is not None:
self.generic_visit(n)
else:
self._print('%sNone' % (self._indent()))
self.indent_lvl -= 1
self._print('%s]' % (self._indent()))
else:
self._print('%s%s=[]' % (self._indent(), self._field(f)))
elif isinstance(v, tuple):
if v:
self._print('%s%s=(' % (self._indent(), self._field(f)))
self.indent_lvl += 1
for n in v:
if n is not None:
self.generic_visit(n)
else:
self._print('%sNone' % (self._indent()))
self.indent_lvl -= 1
self._print('%s)' % (self._indent()))
else:
self._print('%s%s=()' % (self._indent(), self._field(f)))
elif isinstance(v, gast.AST):
self.generic_visit(v, f)
elif isinstance(v, six.binary_type):
self._print('%s%s=%s' % (self._indent(), self._field(f),
self._value('b"%s"' % v)))
elif isinstance(v, six.text_type):
self._print('%s%s=%s' % (self._indent(), self._field(f),
self._value('u"%s"' % v)))
else:
self._print('%s%s=%s' % (self._indent(), self._field(f),
self._value(v)))
self.indent_lvl -= 1
def fmt(node, color=True, noanno=False):
printer = PrettyPrinter(color, noanno)
if isinstance(node, (list, tuple)):
for n in node:
printer.visit(n)
else:
printer.visit(node)
return printer.result
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/pretty_printer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting AST to code and Python entities.
Adapted from Tangent.
"""
import atexit
import errno
import importlib
import os
import sys
import tempfile
from nvidia.dali._autograph.pyct import origin_info
from nvidia.dali._autograph.pyct import parser
def _remove_file(file_name):
"""Remove a file, if it exists."""
try:
os.remove(file_name)
except OSError as e:
if e.errno == errno.ENOENT:
# The file disappeared. Ignore this. Temporary files might get
# cleaned up, especially if they reside in /tmp.
pass
else:
raise
def load_source(source, delete_on_exit):
"""Loads the given source code as a Python module."""
with tempfile.NamedTemporaryFile(
mode='w',
suffix='.py',
prefix='__autograph_generated_file',
delete=False,
encoding='utf-8') as f:
module_name = os.path.basename(f.name[:-3])
file_name = f.name
f.write(source)
if delete_on_exit:
atexit.register(lambda: _remove_file(file_name))
spec = importlib.util.spec_from_file_location(module_name, file_name)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# TODO(mdan): Use our own garbage-collected cache instead of sys.modules.
sys.modules[module_name] = module
return module, file_name
def load_ast(nodes,
indentation=' ',
include_source_map=False,
delete_on_exit=True):
"""Loads the given AST as a Python module.
Compiling the AST code this way ensures that the source code is readable by
e.g. `pdb` or `inspect`.
Args:
nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST
object.
indentation: Text, the string to use for indentation.
include_source_map: bool, whether return a source map.
delete_on_exit: bool, whether to delete the temporary file used for
compilation on exit.
Returns:
Tuple[module, Text, Dict[LineLocation, OriginInfo]], containing:
the module containing the unparsed nodes, the source code corresponding to
nodes, and the source map. Is include_source_map is False, the source map
will be None.
"""
if not isinstance(nodes, (list, tuple)):
nodes = (nodes,)
source = parser.unparse(nodes, indentation=indentation)
module, _ = load_source(source, delete_on_exit)
if include_source_map:
source_map = origin_info.create_source_map(nodes, source, module.__file__)
else:
source_map = None
# TODO(mdan): Return a structured object.
return module, source, source_map
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/loader.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A node transformer that includes utilities for SCT."""
import collections
import enum
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import pretty_printer
from nvidia.dali._autograph.pyct import templates
class AnalysisLevel(enum.IntEnum):
NONE = 0
ACTIVITY = 1
DEFINEDNESS = 2
LIVENESS = 3
# TODO(znado): Use namedtuple.
class Context(object):
"""Contains information about a source code transformation.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
info: EntityInfo, immutable.
namer: naming.Namer.
current_origin: origin_info.OriginInfo, holds the OriginInfo of the last
AST node to be processed successfully. Useful for error handling.
user: An user-supplied context object. The object is opaque to the
infrastructure, but will pe passed through to all custom transformations.
"""
def __init__(self, info, namer, user_context):
self.info = info
self.namer = namer
self.current_origin = None
self.user = user_context
# TODO(mdan): Move to a standalone file.
class EntityInfo(
collections.namedtuple(
'EntityInfo',
('name', 'source_code', 'source_file', 'future_features', 'namespace'))
):
"""Contains information about a Python entity.
Immutable.
Examples of entities include functions and classes.
Attributes:
name: The name that identifies this entity.
source_code: The entity's source code.
source_file: The entity's source file.
future_features: Tuple[Text], the future features that this entity was
compiled with. See
https://docs.python.org/2/reference/simple_stmts.html#future.
namespace: Dict[str, ], containing symbols visible to the entity (excluding
parameters).
"""
pass
class _StateStack(object):
"""Templated context manager.
This class provides syntactic sugar for a stack of objects of known
type. It allows accessing attributes of the object at the top of the stack
directly against this object, which allows for very terse syntax.
For example, this code:
stack = _StateStack(Foo)
stack.enter()
stack.bar
Is equivalent to:
stack = []
stack.append(Foo())
foo = stack[-1]
foo.bar
See _State for more on how this is used.
Attributes:
type: Any, the type of objects that this stack holds
level: int, the current stack depth
stack: List[Any], the actual stack
value: Any, the instance of the object at the top of the stack
"""
def __init__(self, type_):
# Because we override __setattr__, we need to attach these attributes using
# the superclass' setattr.
object.__setattr__(self, 'type', type_)
object.__setattr__(self, '_stack', [])
if not hasattr(type_, 'no_root'):
self.enter()
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def enter(self):
self._stack.append(self.type())
def exit(self):
self._stack.pop()
@property
def stack(self):
return self._stack
@property
def level(self):
return len(self._stack)
@property
def value(self):
return self._stack[-1]
def __iter__(self):
return iter(self._stack)
def __getattr__(self, key):
return getattr(self._stack[-1], key)
def __setattr__(self, key, value):
setattr(self._stack[-1], key, value)
class _State(object):
"""Syntactic sugar for accessing an instance of a StateStack context manager.
This structure offers syntactic sugar over a dict of stacks of objects
of known type. These structures are useful to keep state during AST walks.
Multiple different scopes can be tracked in parallel. For example:
s = _State()
s[foo].enter()
s[bar].enter() # this will not affect s[foo]
Element access has special semantics:
* keys are a data type
* element values are _StateStack(type=key) objects
* missing elements are automatically added, similarly to defaultdict
For example, the following block :
_State s
s[Foo]
Is equivalent to:
s = {}
if Foo not in s:
s[Foo] = Foo()
s[Foo]
See Base for how it's used.
"""
def __init__(self):
self._value = {}
def __getitem__(self, key):
if key not in self._value:
self._value[key] = _StateStack(key)
return self._value[key]
class NodeStateTracker(object):
"""Base class for general-purpose Python code transformation.
This abstract class provides helpful functions, like state tracking within
the scope of arbitrary node, helpers for processing code blocks, debugging,
mapping of transformed code to original code, and others.
Scope-local state tracking: to keep state across nodes, at the level of
(possibly nested) scopes, use enter/exit_local_scope and set/get_local.
You must call enter/exit_local_scope manually, but the transformer detects
when they are not properly paired.
The transformer allows keeping state across calls that is local
to arbitrary nodes and their descendants, using the self.state attribute.
Multiple independent scopes are allowed and automatically constructed.
For example, to keep track of the `If` node that encloses any `Name` node,
one can write:
```
class FooType(object):
def __init__(self):
self.foo_property = None
class DummyTransformer(NodeStateTracker, ast.NodeTransformer):
def visit_If(self, node):
self.state[FooType].enter()
self.state[FooType].foo_property = node
node = self.veneric_visit(node)
self.state[FooType].exit()
return node
def visit_Name(self, node):
self.state[FooType].foo_property # will hold the innermost enclosing if
```
Alternatively, the `enter()`/`exit()` calls can be managed by a `with`
statement:
```
def visit_If(self, node):
with self.state[FooType] as foo:
foo.foo_property = node
return self.generic_visit(node)
```
"""
# TODO(mdan): Document all extra features.
def __init__(self, ctx):
"""Initialize the transformer.
Subclasses should call this.
Args:
ctx: A Context object.
"""
self._lineno = 0
self._col_offset = 0
self.ctx = ctx
# Allows scoping of local variables to keep state across calls to visit_*
# methods. Multiple scope hierarchies may exist and are keyed by tag. A
# scope is valid at one or more nodes and all its children. Scopes created
# in child nodes supersede their parent. Scopes are isolated from one
# another.
self.state = _State()
def debug_print(self, node):
"""Helper method useful for debugging. Prints the AST."""
if __debug__:
print(pretty_printer.fmt(node))
return node
def debug_print_src(self, node):
"""Helper method useful for debugging. Prints the AST as code."""
if __debug__:
print(parser.unparse(node))
return node
def visit_block(self, nodes, before_visit=None, after_visit=None):
"""A more powerful version of generic_visit for statement blocks.
An example of a block is the body of an if statement.
This function allows specifying a postprocessing callback (the
after_visit argument) argument which can be used to move nodes to a new
destination. This is done by after_visit by returning a non-null
second return value, e.g. return new_node, new_destination.
For example, a transformer could perform the following move:
foo()
bar()
baz()
foo()
if cond:
bar()
baz()
The above could be done with a postprocessor of this kind:
def after_visit(node):
if node_is_function_call(bar):
new_container_node = build_cond()
new_container_node.body.append(node)
return new_container_node, new_container_node.body
else:
# Once we set a new destination, all subsequent items will be
# moved to it, so we don't need to explicitly handle baz.
return node, None
Args:
nodes: enumerable of AST node objects. If None, the function returns None.
before_visit: optional callable that is called before visiting each item
in nodes
after_visit: optional callable that takes in an AST node and returns a
tuple (new_node, new_destination). It is called after visiting each item
in nodes. Is used in the same was as the
visit_* methods: new_node will replace the node; if not None,
new_destination must be a list, and subsequent nodes will be placed
in this list instead of the list returned by visit_block.
Returns:
A list of AST node objects containing the transformed items fron nodes,
except those nodes that have been relocated using after_visit.
"""
if nodes is None:
return None
results = []
node_destination = results
for node in nodes:
if before_visit:
# TODO(mdan): We can modify node here too, if ever needed.
before_visit()
replacement = self.visit(node)
if after_visit and replacement:
replacement, new_destination = after_visit(replacement)
else:
new_destination = None
if replacement:
if isinstance(replacement, (list, tuple)):
node_destination.extend(replacement)
else:
node_destination.append(replacement)
# Allow the postprocessor to reroute the remaining nodes to a new list.
if new_destination is not None:
node_destination = new_destination
return results
# TODO(mdan): Rename to PythonCodeTransformer.
class Base(NodeStateTracker, gast.NodeTransformer):
"""Base class for general-purpose Python-to-Python code transformation.
This is an extension of ast.NodeTransformer that provides the additional
functions offered by NodeStateTracker.
"""
def create_assignment(self, target, expression):
template = """
target = expression
"""
return templates.replace(template, target=target, expression=expression)
# TODO(mdan): Remove.
def apply_to_single_assignments(self, targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: list, tuple of or individual AST node. Should be used with the
targets field of an ast.Assign node.
values: an AST node.
apply_fn: a function of a single argument, which will be called with the
respective nodes of each single assignment. The signature is
apply_fn(target, value), no return value.
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
value_el = gast.Subscript(values, i, ctx=gast.Store())
self.apply_to_single_assignments(target_el, value_el, apply_fn)
else:
# TODO(mdan): Look into allowing to rewrite the AST here.
apply_fn(target, values)
def visit(self, node):
if not isinstance(node, gast.AST):
# This is not that uncommon a mistake: various node bodies are lists, for
# example, posing a land mine for transformers that need to recursively
# call `visit`. The error needs to be raised before the exception handler
# below is installed, because said handler will mess up if `node` is not,
# in fact, a node.
msg = ('invalid value for "node": expected "ast.AST", got "{}"; to'
' visit lists of nodes, use "visit_block" instead').format(
type(node))
raise ValueError(msg)
if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
return node
parent_origin = self.ctx.current_origin
if anno.hasanno(node, anno.Basic.ORIGIN):
self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN)
try:
processing_expr_node = isinstance(node, gast.Expr)
if processing_expr_node:
entry_expr_value = node.value
result = super(Base, self).visit(node)
# Adjust for consistency: replacing the value of an Expr with
# an Assign node removes the need for the Expr node.
if (processing_expr_node and isinstance(result, gast.Expr) and
(result.value is not entry_expr_value)):
# When the replacement is a list, it is assumed that the list came
# from a template that contained a number of statements, which
# themselves are standalone and don't require an enclosing Expr.
if isinstance(result.value,
(list, tuple, gast.Assign, gast.AugAssign)):
result = result.value
# By default, all replacements receive the origin info of the replaced
# node.
if result is not node and result is not None:
inherited_origin = anno.getanno(
node, anno.Basic.ORIGIN, default=parent_origin)
if inherited_origin is not None:
nodes_to_adjust = result
if isinstance(result, (list, tuple)):
nodes_to_adjust = result
else:
nodes_to_adjust = (result,)
for n in nodes_to_adjust:
if not anno.hasanno(n, anno.Basic.ORIGIN):
anno.setanno(n, anno.Basic.ORIGIN, inherited_origin)
finally:
self.ctx.current_origin = parent_origin
return result
class CodeGenerator(NodeStateTracker, gast.NodeVisitor):
"""Base class for general-purpose Python-to-string code transformation.
Similar to Base, but outputs arbitrary strings instead of a Python AST.
This uses the same visitor mechanism that the standard NodeVisitor uses,
meaning that subclasses write handlers for the different kinds of nodes.
New code is generated using the emit method, which appends to a code buffer
that can be afterwards obtained from code_buffer.
Example:
class SimpleCodeGen(CodeGenerator):
def visitIf(self, node):
self.emit('if ')
self.visit(node.test)
self.emit(' { ')
self.visit(node.body)
self.emit(' } else { ')
self.visit(node.orelse)
self.emit(' } ')
node = ast.parse(...)
gen = SimpleCodeGen()
gen.visit(node)
# gen.code_buffer contains the resulting code
"""
def __init__(self, ctx):
super(CodeGenerator, self).__init__(ctx)
self._output_code = ''
self.source_map = {}
def emit(self, code):
self._output_code += code
@property
def code_buffer(self):
return self._output_code
def visit(self, node):
if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
return
parent_origin = self.ctx.current_origin
eof_before = len(self._output_code)
if anno.hasanno(node, anno.Basic.ORIGIN):
self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN)
try:
ret = super(CodeGenerator, self).visit(node)
# By default, all replacements receive the origin info of the replaced
# node.
eof_after = len(self._output_code)
if eof_before - eof_after:
inherited_origin = anno.getanno(
node, anno.Basic.ORIGIN, default=parent_origin)
if inherited_origin is not None:
self.source_map[(eof_before, eof_after)] = inherited_origin
return ret
finally:
self.ctx.current_origin = parent_origin
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/transformer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code transformation exceptions."""
class PyCTError(Exception):
"""Base class for all exceptions."""
class UnsupportedLanguageElementError(PyCTError, NotImplementedError):
"""Raised for code patterns that AutoGraph does not support."""
class InaccessibleSourceCodeError(PyCTError, ValueError):
"""Raised when inspect can not access source code."""
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/errors.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gast compatibility library. Supports 0.2.2 and 0.3.2."""
# TODO(mdan): Remove this file once it's safe to break compatibility.
import functools
import gast
from distutils.version import LooseVersion
def get_gast_version():
"""Gast exports `__version__` from 0.5.3 onwards, we need to look it up in a different way."""
if hasattr(gast, '__version__'):
return gast.__version__
try:
import pkg_resources
return pkg_resources.get_distribution("gast").version
except pkg_resources.DistributionNotFound:
# Older gast had 'Str', check for the oldest supported version
if hasattr(gast, 'Str'):
return '0.2'
else:
try:
# Try to call it with 3 arguments, to differentiate between 0.5+ and earlier.
gast.Assign(None, None, None)
except AssertionError as e:
if "Bad argument number for Assign: 3, expecting 2" in str(e):
return '0.4'
return '0.5'
def is_constant(node):
"""Tests whether node represents a Python constant."""
return isinstance(node, gast.Constant)
def is_literal(node):
"""Tests whether node represents a Python literal."""
# Normal literals, True/False/None/Etc. in Python3
if is_constant(node):
return True
# True/False/None/Etc. in Python2
if isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']:
return True
return False
def is_ellipsis(node):
"""Tests whether node represents a Python ellipsis."""
return isinstance(node, gast.Constant) and node.value == Ellipsis
def _compat_assign_gast_4(targets, value, type_comment):
"""Wraps around gast.Assign to use same function signature across versions."""
return gast.Assign(targets=targets, value=value)
def _compat_assign_gast_5(targets, value, type_comment):
"""Wraps around gast.Assign to use same function signature across versions."""
return gast.Assign(targets=targets, value=value, type_comment=type_comment)
if get_gast_version() < LooseVersion("0.5"):
compat_assign = _compat_assign_gast_4
else:
compat_assign = _compat_assign_gast_5
Module = functools.partial(gast.Module, type_ignores=None) # pylint:disable=invalid-name
Name = functools.partial(gast.Name, type_comment=None) # pylint:disable=invalid-name
Str = functools.partial(gast.Constant, kind=None) # pylint:disable=invalid-name
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/gast_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST node annotation support.
Adapted from Tangent.
"""
import enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
# TODO(mdan): Shorten the names.
# These names are heavily used, and anno.blaa
# TODO(mdan): Replace the attr-dict mechanism with a more typed solution.
class NoValue(enum.Enum):
"""Base class for different types of AST annotations."""
def of(self, node, default=None):
return getanno(node, self, default=default)
def add_to(self, node, value):
setanno(node, self, value)
def exists(self, node):
return hasanno(node, self)
def __repr__(self):
return str(self.name)
class Basic(NoValue):
"""Container for basic annotation keys.
The enum values are used strictly for documentation purposes.
"""
QN = 'Qualified name, as it appeared in the code. See qual_names.py.'
SKIP_PROCESSING = (
'This node should be preserved as is and not processed any further.')
INDENT_BLOCK_REMAINDER = (
'When a node is annotated with this, the remainder of the block should'
' be indented below it. The annotation contains a tuple'
' (new_body, name_map), where `new_body` is the new indented block and'
' `name_map` allows renaming symbols.')
ORIGIN = ('Information about the source code that converted code originated'
' from. See origin_information.py.')
DIRECTIVES = ('User directives associated with a statement or a variable.'
' Typically, they affect the immediately-enclosing statement.')
EXTRA_LOOP_TEST = (
'A special annotation containing additional test code to be executed in'
' for loops.')
class Static(NoValue):
"""Container for static analysis annotation keys.
The enum values are used strictly for documentation purposes.
"""
# Symbols
# These flags are boolean.
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
# Scopes
# Scopes are represented by objects of type activity.Scope.
SCOPE = 'The scope for the annotated node. See activity.py.'
# TODO(mdan): Drop these in favor of accessing the child's SCOPE.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
# Static analysis annotations.
DEFINITIONS = (
'Reaching definition information. See reaching_definitions.py.')
ORIG_DEFINITIONS = (
'The value of DEFINITIONS that applied to the original code before any'
' conversion.')
DEFINED_FNS_IN = (
'Local function definitions that may exist when exiting the node. See'
' reaching_fndefs.py')
DEFINED_VARS_IN = (
'Symbols defined when entering the node. See reaching_definitions.py.')
LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
LIVE_VARS_IN = ('Symbols live when entering the node. See liveness.py.')
TYPES = 'Static type information. See type_inference.py.'
CLOSURE_TYPES = 'Types of closure symbols at each detected call site.'
VALUE = 'Static value information. See type_inference.py.'
FAIL = object()
def keys(node, field_name='___pyct_anno'):
if not hasattr(node, field_name):
return frozenset()
return frozenset(getattr(node, field_name).keys())
def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
if (default is FAIL or (hasattr(node, field_name) and
(key in getattr(node, field_name)))):
return getattr(node, field_name)[key]
return default
def hasanno(node, key, field_name='___pyct_anno'):
return hasattr(node, field_name) and key in getattr(node, field_name)
def setanno(node, key, value, field_name='___pyct_anno'):
annotations = getattr(node, field_name, {})
setattr(node, field_name, annotations)
annotations[key] = value
# So that the annotations survive gast_to_ast() and ast_to_gast()
if field_name not in node._fields:
node._fields += (field_name,)
def delanno(node, key, field_name='___pyct_anno'):
annotations = getattr(node, field_name)
del annotations[key]
if not annotations:
delattr(node, field_name)
node._fields = tuple(f for f in node._fields if f != field_name)
def copyanno(from_node, to_node, key, field_name='___pyct_anno'):
if hasanno(from_node, key, field_name=field_name):
setanno(
to_node,
key,
getanno(from_node, key, field_name=field_name),
field_name=field_name)
def dup(node, copy_map, field_name='___pyct_anno'):
"""Recursively copies annotations in an AST tree.
Args:
node: ast.AST
copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
key. All annotations with the source key will be copied to identical
annotations with the destination key.
field_name: str
"""
for n in gast.walk(node):
for k in copy_map:
if hasanno(n, k, field_name):
setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/anno.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Container for origin source code information before AutoGraph compilation."""
import collections
import difflib
import inspect
import os
import tokenize
import gast
import six
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import ast_util
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import pretty_printer
class LineLocation(
collections.namedtuple('LineLocation', ('filename', 'lineno'))):
"""Similar to Location, but without column information.
Attributes:
filename: Text
lineno: int, 1-based
"""
pass
class Location(
collections.namedtuple('Location', ('filename', 'lineno', 'col_offset'))):
"""Encodes code location information.
Attributes:
filename: Text
lineno: int, 1-based
col_offset: int
line_loc: LineLocation
"""
@property
def line_loc(self):
return LineLocation(self.filename, self.lineno)
class OriginInfo(
collections.namedtuple(
'OriginInfo',
('loc', 'function_name', 'source_code_line', 'comment'))):
"""Container for information about the source code before conversion.
Attributes:
loc: Location
function_name: Optional[Text]
source_code_line: Text
comment: Optional[Text]
"""
def as_frame(self):
"""Returns a 4-tuple consistent with the return of traceback.extract_tb."""
return (self.loc.filename, self.loc.lineno, self.function_name,
self.source_code_line)
def __repr__(self):
if self.loc.filename:
return '{}:{}:{}'.format(
os.path.split(self.loc.filename)[1], self.loc.lineno,
self.loc.col_offset)
return '<no file>:{}:{}'.format(self.loc.lineno, self.loc.col_offset)
# TODO(mdan): This source map should be a class - easier to refer to.
def create_source_map(nodes, code, filepath):
"""Creates a source map between an annotated AST and the code it compiles to.
Note: this function assumes nodes nodes, code and filepath correspond to the
same code.
Args:
nodes: Iterable[ast.AST, ...], one or more AST modes.
code: Text, the source code in which nodes are found.
filepath: Text
Returns:
Dict[LineLocation, OriginInfo], mapping locations in code to locations
indicated by origin annotations in node.
"""
reparsed_nodes = parser.parse(code, preamble_len=0, single_node=False)
for node in reparsed_nodes:
resolve(node, code, filepath, node.lineno, node.col_offset)
source_map = {}
try:
for before, after in ast_util.parallel_walk(nodes, reparsed_nodes):
# Note: generated code might not be mapped back to its origin.
# TODO(mdan): Generated code should always be mapped to something.
origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None)
final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None)
if origin_info is None or final_info is None:
continue
# Note: the keys are by line only, excluding the column offset.
line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno)
existing_origin = source_map.get(line_loc)
if existing_origin is not None:
# Overlaps may exist because of child nodes, but almost never to
# different line locations. Exception make decorated functions, where
# both lines are mapped to the same line in the AST.
# Line overlaps: keep bottom node.
if existing_origin.loc.line_loc == origin_info.loc.line_loc:
if existing_origin.loc.lineno >= origin_info.loc.lineno:
continue
# In case of column overlaps, keep the leftmost node.
if existing_origin.loc.col_offset <= origin_info.loc.col_offset:
continue
source_map[line_loc] = origin_info
except ValueError as err:
new_msg = 'Inconsistent ASTs detected. This is a bug. Cause: \n'
new_msg += str(err)
new_msg += 'Diff:\n'
for n, rn in zip(nodes, reparsed_nodes):
nodes_str = pretty_printer.fmt(n, color=False, noanno=True)
reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True)
diff = difflib.context_diff(
nodes_str.split('\n'),
reparsed_nodes_str.split('\n'),
fromfile='Original nodes',
tofile='Reparsed nodes',
n=7)
diff = '\n'.join(diff)
new_msg += diff + '\n'
raise ValueError(new_msg)
return source_map
class _Function(object):
def __init__(self, name):
self.name = name
class OriginResolver(gast.NodeVisitor):
"""Annotates an AST with additional source information like file name."""
def __init__(self, root_node, source_lines, comments_map,
context_lineno, context_col_offset,
filepath):
self._source_lines = source_lines
self._comments_map = comments_map
if (hasattr(root_node, 'decorator_list') and root_node.decorator_list and
hasattr(root_node.decorator_list[0], 'lineno')):
# Typical case: functions. The line number of the first decorator
# is more accurate than the line number of the function itself in
# 3.8+. In earier versions they coincide.
self._lineno_offset = context_lineno - root_node.decorator_list[0].lineno
else:
# Fall back to the line number of the root node.
self._lineno_offset = context_lineno - root_node.lineno
self._col_offset = context_col_offset - root_node.col_offset
self._filepath = filepath
self._function_stack = []
def _absolute_lineno(self, lineno):
return lineno + self._lineno_offset
def _absolute_col_offset(self, col_offset):
if col_offset is None:
return 0
return col_offset + self._col_offset
def _attach_origin_info(self, node):
lineno = getattr(node, 'lineno', None)
col_offset = getattr(node, 'col_offset', None)
if lineno is None:
return
if self._function_stack:
function_name = self._function_stack[-1].name
else:
function_name = None
source_code_line = self._source_lines[lineno - 1]
comment = self._comments_map.get(lineno)
loc = Location(self._filepath, self._absolute_lineno(lineno),
self._absolute_col_offset(col_offset))
origin = OriginInfo(loc, function_name, source_code_line, comment)
anno.setanno(node, 'lineno', lineno)
anno.setanno(node, anno.Basic.ORIGIN, origin)
def visit(self, node):
entered_function = False
if isinstance(node, gast.FunctionDef):
entered_function = True
self._function_stack.append(_Function(node.name))
self._attach_origin_info(node)
self.generic_visit(node)
if entered_function:
self._function_stack.pop()
def resolve(node, source, context_filepath, context_lineno, context_col_offset):
"""Adds origin information to an AST, based on the source it was loaded from.
This allows us to map the original source code line numbers to generated
source code.
Note: the AST may be a part of a larger context (e.g. a function is part of
a module that may contain other things). However, this function does not
assume the source argument contains the entire context, nor that it contains
only code corresponding to node itself. However, it assumes that node was
parsed from the given source code.
For this reason, two extra arguments are required, and they indicate the
location of the node in the original context.
Args:
node: gast.AST, the AST to annotate.
source: Text, the source code representing node.
context_filepath: Text
context_lineno: int
context_col_offset: int
"""
# TODO(mdan): Pull this to a separate utility.
code_reader = six.StringIO(source)
comments_map = {}
try:
for token in tokenize.generate_tokens(code_reader.readline):
tok_type, tok_string, loc, _, _ = token
srow, _ = loc
if tok_type == tokenize.COMMENT:
comments_map[srow] = tok_string.strip()[1:].strip()
except tokenize.TokenError:
if isinstance(node, gast.Lambda):
# Source code resolution in older Python versions is brittle for
# lambda functions, and may contain garbage.
pass
else:
raise
source_lines = source.split('\n')
visitor = OriginResolver(node, source_lines, comments_map,
context_lineno, context_col_offset,
context_filepath)
visitor.visit(node)
def resolve_entity(node, source, entity):
"""Like resolve, but extracts the context information from an entity."""
lines, lineno = inspect.getsourcelines(entity)
filepath = inspect.getsourcefile(entity)
# Poor man's attempt at guessing the column offset: count the leading
# whitespace. This might not work well with tabs.
definition_line = lines[0]
col_offset = len(definition_line) - len(definition_line.lstrip())
resolve(node, source, filepath, lineno, col_offset)
def copy_origin(from_node, to_node):
"""Copies the origin info from a node to another, recursively."""
origin = anno.Basic.ORIGIN.of(from_node, default=None)
if origin is None:
return
if not isinstance(to_node, (list, tuple)):
to_node = (to_node,)
for node in to_node:
for n in gast.walk(node):
anno.setanno(n, anno.Basic.ORIGIN, origin)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/origin_info.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for manipulating qualified names.
A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite
(e.g. 'foo.bar') syntactic symbols.
This is *not* related to the __qualname__ attribute used by inspect, which
refers to scopes.
"""
import collections
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
class CallerMustSetThis(object):
pass
class Symbol(collections.namedtuple('Symbol', ['name'])):
"""Represents a Python symbol."""
class Literal(collections.namedtuple('Literal', ['value'])):
"""Represents a Python numeric literal."""
def __str__(self):
if isinstance(self.value, str):
return "'{}'".format(self.value)
return str(self.value)
def __repr__(self):
return str(self)
# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans.
class QN(object):
"""Represents a qualified name."""
def __init__(self, base, attr=None, subscript=None):
if attr is not None and subscript is not None:
raise ValueError('A QN can only be either an attr or a subscript, not '
'both: attr={}, subscript={}.'.format(attr, subscript))
self._has_attr = False
self._has_subscript = False
if attr is not None:
if not isinstance(base, QN):
raise ValueError(
'for attribute QNs, base must be a QN; got instead "%s"' % base)
if not isinstance(attr, str):
raise ValueError('attr may only be a string; got instead "%s"' % attr)
self._parent = base
# TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now.
self.qn = (base, attr)
self._has_attr = True
elif subscript is not None:
if not isinstance(base, QN):
raise ValueError('For subscript QNs, base must be a QN.')
self._parent = base
self.qn = (base, subscript)
self._has_subscript = True
else:
if not isinstance(base, (str, Literal)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
'for simple QNs, base must be a string or a Literal object;'
' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
def is_symbol(self):
return isinstance(self.qn[0], str)
def is_simple(self):
return len(self.qn) <= 1
def is_composite(self):
return len(self.qn) > 1
def has_subscript(self):
return self._has_subscript
def has_attr(self):
return self._has_attr
@property
def attr(self):
if not self._has_attr:
raise ValueError('Cannot get attr of non-attribute "%s".' % self)
return self.qn[1]
@property
def parent(self):
if self._parent is None:
raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0])
return self._parent
@property
def owner_set(self):
"""Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
"""
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
@property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
def __hash__(self):
return hash(self.qn + (self._has_attr, self._has_subscript))
def __eq__(self, other):
return (isinstance(other, QN) and self.qn == other.qn and
self.has_subscript() == other.has_subscript() and
self.has_attr() == other.has_attr())
def __lt__(self, other):
return str(self) < str(other)
def __gt__(self, other):
return str(self) > str(other)
def __str__(self):
root = self.qn[0]
if self.has_subscript():
return '{}[{}]'.format(root, self.qn[1])
if self.has_attr():
return '.'.join(map(str, self.qn))
else:
return str(root)
def __repr__(self):
return str(self)
def ssf(self):
"""Simple symbol form."""
ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]
ssf_string = ''
for i in range(0, len(self.qn) - 1):
if self.has_subscript():
delimiter = '_sub_'
else:
delimiter = '_'
ssf_string += ssfs[i] + delimiter
return ssf_string + ssfs[-1]
def ast(self):
"""AST representation."""
# The caller must adjust the context appropriately.
if self.has_subscript():
return gast.Subscript(
value=self.parent.ast(),
slice=self.qn[-1].ast(),
ctx=CallerMustSetThis)
if self.has_attr():
return gast.Attribute(
value=self.parent.ast(), attr=self.qn[-1], ctx=CallerMustSetThis)
base = self.qn[0]
if isinstance(base, str):
return gast.Name(
base, ctx=CallerMustSetThis, annotation=None, type_comment=None)
elif isinstance(base, Literal):
return gast.Constant(base.value, kind=None)
else:
assert False, ('the constructor should prevent types other than '
'str and Literal')
class QnResolver(gast.NodeTransformer):
"""Annotates nodes with QN information.
Note: Not using NodeAnnos to avoid circular dependencies.
"""
def visit_Name(self, node):
node = self.generic_visit(node)
anno.setanno(node, anno.Basic.QN, QN(node.id))
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr))
return node
def visit_Subscript(self, node):
# TODO(mdan): This may no longer apply if we overload getitem.
node = self.generic_visit(node)
s = node.slice
if isinstance(s, (gast.Tuple, gast.Slice)):
# TODO(mdan): Support range and multi-dimensional indices.
# Continuing silently because some demos use these.
return node
if isinstance(s, gast.Constant) and s.value != Ellipsis:
subscript = QN(Literal(s.value))
else:
# The index may be an expression, case in which a name doesn't make sense.
if anno.hasanno(s, anno.Basic.QN):
subscript = anno.getanno(s, anno.Basic.QN)
else:
return node
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN),
subscript=subscript))
return node
def resolve(node):
return QnResolver().visit(node)
def from_str(qn_str):
node = parser.parse_expression(qn_str)
node = resolve(node)
return anno.getanno(node, anno.Basic.QN)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/qual_names.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live variable analysis.
See https://en.wikipedia.org/wiki/Live_variable_analysis for a definition of
the following idioms: live variable, live in, live out, which are used
throughout this file.
This analysis attaches the following:
* symbols that are live at the exit of control flow statements
* symbols that are live at the entry of control flow statements
Requires activity analysis.
"""
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.static_analysis import annos
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that performs liveness analysis at statement level."""
def __init__(self, graph, include_annotations):
super(Analyzer, self).__init__(graph)
self.include_annotations = include_annotations
def init_state(self, _):
return set()
def visit_node(self, node):
prev_live_in = self.in_[node]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
gen = node_scope.read
if not self.include_annotations:
gen -= node_scope.annotations
# TODO(mdan): verify whether composites' parents need to be added.
# E.g. whether x needs to be added if x.y is live. Theoretically the
# activity analysis should have both so that wouldn't be needed.
kill = node_scope.modified | node_scope.deleted
live_out = set()
for n in node.next:
live_out |= self.in_[n]
live_in = gen | (live_out - kill)
reaching_functions = anno.getanno(
node.ast_node, anno.Static.DEFINED_FNS_IN)
for fn_ast_node in reaching_functions:
if isinstance(fn_ast_node, gast.Lambda):
# Exception: lambda functions are assumed to be used only in the
# place where they are defined, and not later.
continue
fn_scope = anno.getanno(fn_ast_node, annos.NodeAnno.ARGS_AND_BODY_SCOPE)
# Any closure of a reaching function definition is conservatively
# considered live.
live_in |= (fn_scope.read - fn_scope.bound)
else:
assert self.can_ignore(node), (node.ast_node, node)
live_out = set()
for n in node.next:
live_out |= self.in_[n]
live_in = live_out
self.in_[node] = live_in
self.out[node] = live_out
# TODO(mdan): Move this to the superclass?
return prev_live_in != live_in
class TreeAnnotator(transformer.Base):
"""Runs liveness analysis on each of the functions defined in the AST.
If a function defined other local functions, those will have separate CFGs.
However, dataflow analysis needs to tie up these CFGs to properly emulate the
effect of closures. In the case of liveness, the parent function's live
variables must account for the variables that are live at the entry of each
subfunction. For example:
def foo():
# baz is live from here on
def bar():
print(baz)
This analyzer runs liveness analysis on each individual function, accounting
for the effect above.
"""
def __init__(self, source_info, graphs, include_annotations):
super(TreeAnnotator, self).__init__(source_info)
self.include_annotations = include_annotations
self.allow_skips = False
self.graphs = graphs
self.current_analyzer = None
def visit(self, node):
node = super(TreeAnnotator, self).visit(node)
if (self.current_analyzer is not None and
isinstance(node, gast.stmt) and
node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.LIVE_VARS_IN,
frozenset(self.current_analyzer.in_[cfg_node]))
return node
def _analyze_function(self, node, is_lambda):
parent_analyzer = self.current_analyzer
analyzer = Analyzer(self.graphs[node], self.include_annotations)
analyzer.visit_reverse()
self.current_analyzer = analyzer
node = self.generic_visit(node)
self.current_analyzer = parent_analyzer
return node
def visit_Lambda(self, node):
return self._analyze_function(node, is_lambda=True)
def visit_FunctionDef(self, node):
return self._analyze_function(node, is_lambda=False)
def _block_statement_live_out(self, node):
successors = self.current_analyzer.graph.stmt_next[node]
stmt_live_out = set()
for s in successors:
stmt_live_out.update(self.current_analyzer.in_[s])
anno.setanno(node, anno.Static.LIVE_VARS_OUT, frozenset(stmt_live_out))
return node
def _block_statement_live_in(self, node, entry_node):
if entry_node in self.current_analyzer.graph.index:
cfg_node = self.current_analyzer.graph.index[entry_node]
stmt_live_in = frozenset(self.current_analyzer.in_[cfg_node])
else:
assert anno.hasanno(entry_node, anno.Static.LIVE_VARS_IN), (
'If not matching a CFG node, must be a block statement:'
' {}'.format(entry_node))
stmt_live_in = anno.getanno(entry_node, anno.Static.LIVE_VARS_IN)
anno.setanno(node, anno.Static.LIVE_VARS_IN, stmt_live_in)
return node
def visit_If(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.test)
def visit_For(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.iter)
def visit_While(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.test)
def visit_Try(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.body[0])
def visit_ExceptHandler(self, node):
node = self.generic_visit(node)
node = self._block_statement_live_out(node)
return self._block_statement_live_in(node, node.body[0])
def visit_With(self, node):
node = self.generic_visit(node)
return self._block_statement_live_in(node, node.items[0])
def visit_Expr(self, node):
node = self.generic_visit(node)
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.LIVE_VARS_OUT,
frozenset(self.current_analyzer.out[cfg_node]))
return node
# TODO(mdan): Investigate the possibility of removing include_annotations.
def resolve(node, source_info, graphs, include_annotations=True):
"""Resolves the live symbols at the exit of control flow statements.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
include_annotations: Bool, whether type annotations should be included in
the analysis.
Returns:
ast.AST
"""
node = TreeAnnotator(source_info, graphs, include_annotations).visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/static_analysis/liveness.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type inference.
This analysis annotates all symbols nodes of an AST with type information
extracted from static sources:
* type annotations
* global and local symbols visible to the function at analysis time
* literals
Important: This analysis is static, and does not detect dynamic type changes.
The analysis attempts to use the values of external symbols, if available. These
values are also considered static for the purpose of analysis.
Requires reaching function definitions analysis.
"""
import itertools
from typing import Any, Callable, Dict, Set
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import annos
class Resolver(object):
"""Resolver objects handle the process of looking up actual names and types.
Unless noted otherwise, all resolve_* methods:
* have a first namespace argument, mapping string to actual values
* have a second types_namespace argument, mapping string to actual inferred
types
* specify names as QN objects
* specify types as a Set of inferred types
Unless noted otherwise, all resolve_* methods must return either:
* a set of `type` objects
* None
"""
def res_name(self, ns, types_ns, name):
"""Resolves the type/value an external (e.g. closure, global) variable.
Args:
ns: namespace
types_ns: types namespace
name: symbol name
Returns:
Tuple (type, static_value). The first element is the type to use for
inferrence. The second is the static value to use. Return None to treat it
as unknown.
"""
raise NotImplementedError('subclasses must implement')
def res_value(self, ns, value):
"""Resolves the type a literal or static value."""
raise NotImplementedError('subclasses must implement')
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
"""Resolves the type of a (possibly annotated) function argument.
Args:
ns: namespace
types_ns: types namespace
f_name: str, the function name
name: str, the argument name
type_anno: the type annotating the argument, if any
f_is_local: bool, whether the function is a local function
Returns:
Set of the argument types.
"""
raise NotImplementedError('subclasses must implement')
def res_call(self, ns, types_ns, node, f_type, args, keywords):
"""Resolves the return type an external function or method call.
Args:
ns: namespace
types_ns: types namespace
node: str, the function name
f_type: types of the actual function being called, if known
args: types of each respective argument in node.args
keywords: types of each respective argument in node.keywords
Returns:
Tuple (return_type, side_effect_types). The first element is just the
return types of the function. The second element is a map from
argument names to sets of types, and allow modelling side effects of
functions (for example via global or nonlocal).
"""
raise NotImplementedError('subclasses must implement')
# TODO(mdan): Clean this up.
def res_slice(self, ns, types_ns, node_or_slice, value, slice_):
"""Resolves the return type of slice operation."""
raise NotImplementedError('subclasses must implement')
def res_compare(self, ns, types_ns, node, left, right):
"""Resolves the return type of a unary operation."""
raise NotImplementedError('subclasses must implement')
def res_unop(self, ns, types_ns, node, opnd):
"""Resolves the return type of a unary operation."""
raise NotImplementedError('subclasses must implement')
def res_binop(self, ns, types_ns, node, left, right):
"""Resolves the return type of a binary operation."""
raise NotImplementedError('subclasses must implement')
def res_list_literal(self, ns, elt_types):
"""Resolves the type of a list literal from its elements."""
raise NotImplementedError('subclasses must implement')
class _TypeMap(object):
"""Abstraction for the state of the CFG walk for type inference.
This is a value type. Only implements the strictly necessary operators.
Attributes:
types: Dict[qual_names.QN, Set[Type]], mapping symbols to the set of
possible types.
"""
def __init__(self, init_from=None):
if init_from:
assert isinstance(init_from, _TypeMap)
self.types = {
s: set(other_types) for s, other_types in init_from.types.items()
}
else:
self.types = {}
def __eq__(self, other):
if frozenset(self.types.keys()) != frozenset(other.types.keys()):
return False
ret = all(self.types[s] == other.types[s] for s in self.types)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _TypeMap)
result = _TypeMap(self)
for s, other_types in other.types.items():
if s not in result.types:
self_types = set()
result.types[s] = self_types
else:
self_types = result.types[s]
self_types.update(other_types)
return result
def __repr__(self):
return 'SymbolTable {}'.format(self.types)
NO_VALUE = object()
class StmtInferrer(gast.NodeVisitor):
"""Runs type inference on a single AST statement.
This visitor annotates most nodes with type information. It also sets types
for the symbols modified by this statement in its types_out property.
Note: this inferrer is able to capture side effects of functions, however,
these side effects will not be applied to the current expression. Doing so
would create too much of a dependence on the runtime's internal rules about
execution order.
Example:
def f():
nonlocal a
a = 1
return a
a = 0.0
b = f() + a # a = float; side effect of f() ignored
print(a) # a = int; side effect of f() accounted for
"""
def __init__(self,
resolver: Resolver,
scope: activity.Scope,
namespace: Dict[qual_names.QN, Any],
closure_types: Dict[qual_names.QN, Set[Any]],
types_in: _TypeMap):
self.resolver = resolver
self.scope = scope
self.namespace = namespace
self.closure_types = closure_types
self.types_in = types_in
self.new_symbols = {}
# rvalue type. This property is set when encountering an assign operation,
# so that visiting nodes with Store ctx (typically found on left side of
# assignments) can infer the type they should receive.
self.rtype = None
def visit(self, node):
types = super().visit(node)
if __debug__:
self._check_set(types)
if types is not None:
# TODO(mdan): Normalize by removing subtypes.
anno.setanno(node, anno.Static.TYPES, tuple(types))
return types
def _check_set(self, value):
if value is not None and not isinstance(value, set):
raise ValueError('{} method expected to return set, got {}'.format(
self.resolver, value))
def visit_Constant(self, node):
types = self.resolver.res_value(self.namespace, node.value)
if __debug__:
self._check_set(types)
return types
def _apply_unpacking(self, node):
assert isinstance(node.ctx, gast.Store)
if self.rtype is not None:
original_stype = self.rtype
# TODO(mdan): Find a better way to express unpacking.
i_type = self.resolver.res_value(self.namespace, 0)
for i, elt in enumerate(node.elts):
self.rtype = self.resolver.res_slice(
self.namespace, self.types_in.types, i, original_stype, i_type)
self.visit(elt)
self.rtype = original_stype
return original_stype
return None
def visit_Tuple(self, node):
if isinstance(node.ctx, gast.Load):
elt_types = ()
for elt in node.elts:
types_ = self.visit(elt)
if types_ is None:
return None
elt_types += (types_,)
return set(itertools.product(*elt_types))
return self._apply_unpacking(node)
def visit_List(self, node):
if isinstance(node.ctx, gast.Load):
elt_types = tuple(self.visit(elt) for elt in node.elts)
return self.resolver.res_list_literal(self.namespace, elt_types)
return self._apply_unpacking(node)
def visit_Set(self, node):
raise NotImplementedError()
def visit_Name(self, node):
name = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
types = self.types_in.types.get(name, None)
if types is None:
if (name not in self.scope.bound) or (name in self.scope.nonlocals):
# TODO(mdan): Test with global variables.
if name in self.closure_types:
types = self.closure_types[name]
else:
types, value = self.resolver.res_name(
self.namespace, self.types_in.types, name)
if value is not None:
anno.setanno(node, anno.Static.VALUE, value)
elif isinstance(node.ctx, gast.Param):
# The direct parent it the whole function scope. See activity.py.
f_is_local = self.scope.parent.parent is not None
type_name = anno.getanno(node.annotation, anno.Basic.QN, None)
types = self.resolver.res_arg(self.namespace, self.types_in.types,
self.scope.function_name, name, type_name,
f_is_local)
if types is not None:
self.new_symbols[name] = types
elif isinstance(node.ctx, gast.Store):
if self.rtype is not None:
self.new_symbols[name] = self.rtype
types = self.rtype
else:
assert False, 'unknown ctx'
if __debug__:
self._check_set(types)
return types
def visit_Attribute(self, node):
parent_types = self.visit(node.value)
# Attempt to use the static value if known.
parent_value = anno.Static.VALUE.of(node.value, None)
if parent_value is not None:
static_value = getattr(parent_value, node.attr, NO_VALUE)
if static_value is NO_VALUE:
# Unexpected failure to resolve attribute. Ask the resolver about the
# full name instead.
types, static_value = self.resolver.res_name(
self.namespace, self.types_in, anno.Basic.QN.of(node))
anno.setanno(node, anno.Static.VALUE, static_value)
if __debug__:
self._check_set(types)
return types
else:
# Fall back to the type if that is known.
if parent_types is None:
return None
inferred_values = [getattr(t, node.attr, None) for t in parent_types]
if not inferred_values:
return None
static_value = inferred_values[0]
if static_value is None:
return None
if any(v is not static_value for v in inferred_values[1:]):
# Static value not stable, assume it's dynamic.
return None
types = self.resolver.res_value(self.namespace, static_value)
anno.setanno(node, anno.Static.VALUE, static_value)
if __debug__:
self._check_set(types)
return types
def visit_FunctionDef(self, node):
f_name = qual_names.QN(node.name)
if node.decorator_list:
raise NotImplementedError('decorators: {}'.format(node.decorator_list))
ret_types = None
if node.returns:
ret_types, _ = self.resolver.res_name(
self.namespace, self.types_in.types, anno.Basic.QN.of(node.returns))
if __debug__:
self._check_set(ret_types)
if ret_types is None:
ret_types = {Any}
f_types = set()
for rt in ret_types:
f_types.add(Callable[[Any], rt])
self.new_symbols[f_name] = f_types
# The definition of a function is an expression, hence has no return value.
return None
def _resolve_typed_callable(self, f_types, arg_types, keyword_types):
ret_types = set()
for t in f_types:
if isinstance(t, Callable):
# Note: these are undocummented - may be version-specific!
# Callable[[x], y]: __args__ are (x, y)
args = t.__args__
if args:
ret_types.add(args[-1])
else:
ret_types.add(Any)
else:
raise NotImplementedError('callable type {}'.format(type(t)))
# Side effects can not be inferred based on type alone.
side_effects = None
return ret_types, side_effects
def visit_Call(self, node):
self.visit(node.func)
f_name = anno.Basic.QN.of(node.func)
arg_types = [self.visit(a) for a in node.args]
keyword_types = [self.visit(kw.value) for kw in node.keywords]
if f_name in self.scope.bound:
# Local function, use local type definitions, if available.
f_type = self.types_in.types.get(f_name, None)
if f_type is None:
# No static type info available, nothing more to do.
ret_type, side_effects = None, None
else:
ret_type, side_effects = self._resolve_typed_callable(
f_type, arg_types, keyword_types)
else:
# Nonlocal function, resolve externally.
f_type = anno.Static.TYPES.of(node.func, None)
ret_type, side_effects = self.resolver.res_call(self.namespace,
self.types_in.types, node,
f_type, arg_types,
keyword_types)
if __debug__:
self._check_set(ret_type)
if side_effects:
if not isinstance(side_effects, dict):
raise ValueError(
'side effects must be dict, got {}'.format(side_effects))
for k, v in side_effects.items():
if not isinstance(k, qual_names.QN):
raise ValueError('side effect keys must be QNs, got {}'.format(k))
self._check_set(v)
if side_effects:
self.new_symbols.update(side_effects)
return ret_type
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Assign(self, node):
self.rtype = self.visit(node.value)
for t in node.targets:
self.visit(t)
self.rtype = None
def visit_Subscript(self, node):
val_types = self.visit(node.value)
slice_types = self.visit(node.slice)
if val_types is None or slice_types is None:
return None
types = self.resolver.res_slice(
self.namespace, self.types_in.types, node, val_types, slice_types)
if __debug__:
self._check_set(types)
return types
def visit_Compare(self, node):
left_types = self.visit(node.left)
right_types = [self.visit(c) for c in node.comparators]
if left_types is None or any(t is None for t in right_types):
return None
types = self.resolver.res_compare(
self.namespace, self.types_in.types, node, left_types, right_types)
if __debug__:
self._check_set(types)
return types
def visit_BinOp(self, node):
left_types = self.visit(node.left)
right_types = self.visit(node.right)
if left_types is None or right_types is None:
return None
types = self.resolver.res_binop(
self.namespace, self.types_in.types, node, left_types, right_types)
if __debug__:
self._check_set(types)
return types
def visit_UnaryOp(self, node):
opnd_types = self.visit(node.operand)
if opnd_types is None:
return None
types = self.resolver.res_unop(
self.namespace, self.types_in.types, node, opnd_types)
if __debug__:
self._check_set(types)
return types
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that propagates type information across statements."""
def __init__(self, graph, resolver, namespace, scope, closure_types):
"""Creates a new analyzer.
Args:
graph: cfg.Graph
resolver: Resolver
namespace: Dict[str, Any]
scope: activity.Scope
closure_types: Dict[QN, Set]
"""
super(Analyzer, self).__init__(graph)
self.resolver = resolver
self.namespace = namespace
self.scope = scope
self.closure_types = closure_types
context_types = {
n: t for n, t in closure_types.items() if n not in scope.bound
}
if context_types:
self.context_types = _TypeMap()
self.context_types.types = context_types
else:
self.context_types = None
def init_state(self, _):
return _TypeMap()
def _update_closure_types(self, ast_node, types):
existing_types = anno.Static.CLOSURE_TYPES.of(ast_node, None)
if existing_types is None:
existing_types = {}
anno.Static.CLOSURE_TYPES.add_to(ast_node, existing_types)
for k, v in types.types.items():
if k in existing_types:
existing_types[k].update(v)
else:
existing_types[k] = set(v)
def visit_node(self, node):
prev_types_out = self.out[node]
types_in = _TypeMap()
for n in node.prev:
types_in |= self.out[n]
if (self.context_types is not None) and (node is self.graph.entry):
types_in |= self.context_types
types_out = _TypeMap(types_in)
ast_node = node.ast_node
inferrer = StmtInferrer(self.resolver, self.scope, self.namespace,
self.closure_types, types_in)
inferrer.visit(ast_node)
types_out.types.update(inferrer.new_symbols)
reaching_fndefs = anno.Static.DEFINED_FNS_IN.of(ast_node)
node_scope = anno.Static.SCOPE.of(ast_node, None)
if node_scope is not None:
# TODO(mdan): Check that it's actually safe to skip nodes without scope.
reads = {str(qn) for qn in node_scope.read}
for def_node in reaching_fndefs:
if def_node.name in reads:
self._update_closure_types(def_node, types_out)
self.in_[node] = types_in
self.out[node] = types_out
return prev_types_out != types_out
class FunctionVisitor(transformer.Base):
"""AST visitor that applies type inference to each function separately."""
def __init__(self, source_info, graphs, resolver):
super(FunctionVisitor, self).__init__(source_info)
self.graphs = graphs
self.resolver = resolver
def visit_FunctionDef(self, node):
subgraph = self.graphs[node]
scope = anno.getanno(node, annos.NodeAnno.ARGS_AND_BODY_SCOPE)
closure_types = anno.getanno(node, anno.Static.CLOSURE_TYPES, {})
analyzer = Analyzer(subgraph, self.resolver, self.ctx.info.namespace, scope,
closure_types)
analyzer.visit_forward()
# Recursively process any remaining subfunctions.
node.body = self.visit_block(node.body)
return node
def resolve(node, source_info, graphs, resolver):
"""Performs type inference.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
resolver: Resolver
Returns:
ast.AST
"""
visitor = FunctionVisitor(source_info, graphs, resolver)
node = visitor.visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/static_analysis/type_inference.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activity analysis.
Requires qualified name annotations (see qual_names.py).
"""
import copy
import weakref
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.static_analysis.annos import NodeAnno
class Scope(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Caution - the AST references held by this object are weak.
Scope objects are mutable during construction only, and must be frozen using
`Scope.finalize()` before use. Furthermore, a scope is consistent only after
all its children have been frozen. While analysing code blocks, scopes are
being gradually built, from the innermost scope outward. Freezing indicates
that the analysis of a code block is complete. Once frozen, mutation is no
longer allowed. `is_final` tracks whether the scope is frozen or not. Certain
properties, like `referenced`, are only accurate when called on frozen scopes.
Attributes:
parent: Optional[Scope], the parent scope, if any.
isolated: bool, whether the scope is a true Python scope (e.g. the scope of
a function), or just a surrogate tracking an ordinary code block. Using
the terminology of the Python 3 reference documentation, True roughly
represents an actual scope, whereas False represents an ordinary code
block.
function_name: Optional[str], name of the function owning this scope.
isolated_names: Set[qual_names.QN], identifiers that are isolated to this
scope (even if the scope is not isolated).
annotations: Set[qual_names.QN], identifiers used as type annotations
in this scope.
read: Set[qual_names.QN], identifiers read in this scope.
modified: Set[qual_names.QN], identifiers modified in this scope.
deleted: Set[qual_names.QN], identifiers deleted in this scope.
bound: Set[qual_names.QN], names that are bound to this scope. See
https://docs.python.org/3/reference/executionmodel.html#binding-of-names
for a precise definition.
globals: Set[qual_names.QN], names that are explicitly marked as global in
this scope. Note that this doesn't include free read-only vars bound to
global symbols.
nonlocals: Set[qual_names.QN], names that are explicitly marked as nonlocal
in this scope. Note that this doesn't include free read-only vars bound to
global symbols.
free_vars: Set[qual_names.QN], the free variables in this scope. See
https://docs.python.org/3/reference/executionmodel.html for a precise
definition.
params: WeakValueDictionary[qual_names.QN, ast.Node], function arguments
visible in this scope, mapped to the function node that defines them.
enclosing_scope: Scope, the innermost isolated scope that is a transitive
parent of this scope. May be the scope itself.
referenced: Set[qual_names.QN], the totality of the symbols used by this
scope and its parents.
is_final: bool, whether the scope is frozen or not.
Note - simple statements may never delete and modify a symbol at the same
time. However, compound ones like if statements can. In that latter case, it's
undefined whether the symbol is actually modified or deleted upon statement
exit. Certain analyses like reaching definitions need to be careful about
this.
"""
# Note: this mutable-immutable pattern is used because using a builder would
# have taken a lot more boilerplate.
def __init__(self, parent, isolated=True, function_name=None):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
modified in this scope should be considered modified in the parent
scope.
function_name: Name of the function owning this scope.
"""
self.parent = parent
self.isolated = isolated
self.function_name = function_name
self.isolated_names = set()
self.read = set()
self.modified = set()
self.deleted = set()
self.bound = set()
self.globals = set()
self.nonlocals = set()
self.annotations = set()
self.params = weakref.WeakValueDictionary()
# Certain fields can only be accessed after the scope and all its parent
# scopes have been fully built. This field guards that.
self.is_final = False
@property
def enclosing_scope(self):
assert self.is_final
if self.parent is not None and not self.isolated:
return self.parent
return self
@property
def referenced(self):
if self.parent is not None:
return self.read | self.parent.referenced
return self.read
@property
def free_vars(self):
enclosing_scope = self.enclosing_scope
return enclosing_scope.read - enclosing_scope.bound
def copy_from(self, other):
"""Recursively copies the contents of this scope from another scope."""
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.copy_from(other.parent)
self.isolated_names = copy.copy(other.isolated_names)
self.modified = copy.copy(other.modified)
self.read = copy.copy(other.read)
self.deleted = copy.copy(other.deleted)
self.bound = copy.copy(other.bound)
self.annotations = copy.copy(other.annotations)
self.params = copy.copy(other.params)
@classmethod
def copy_of(cls, other):
if other.parent is not None:
assert other.parent is not None
parent = cls.copy_of(other.parent)
else:
parent = None
new_copy = cls(parent)
new_copy.copy_from(other)
return new_copy
def merge_from(self, other):
"""Adds all activity from another scope to this scope."""
assert not self.is_final
if self.parent is not None:
assert other.parent is not None
self.parent.merge_from(other.parent)
self.isolated_names.update(other.isolated_names)
self.read.update(other.read)
self.modified.update(other.modified)
self.bound.update(other.bound)
self.deleted.update(other.deleted)
self.annotations.update(other.annotations)
self.params.update(other.params)
def finalize(self):
"""Freezes this scope."""
assert not self.is_final
# TODO(mdan): freeze read, modified, bound.
if self.parent is not None:
assert not self.parent.is_final
if not self.isolated:
self.parent.read.update(self.read - self.isolated_names)
self.parent.modified.update(self.modified - self.isolated_names)
self.parent.bound.update(self.bound - self.isolated_names)
self.parent.globals.update(self.globals)
self.parent.nonlocals.update(self.nonlocals)
self.parent.annotations.update(self.annotations)
else:
# TODO(mdan): This is not accurate.
self.parent.read.update(self.read - self.bound)
self.parent.annotations.update(self.annotations - self.bound)
self.is_final = True
def __repr__(self):
return 'Scope{r=%s, w=%s}' % (tuple(self.read), tuple(self.modified))
def mark_param(self, name, owner):
# Assumption: all AST nodes have the same life span. This lets us use
# a weak reference to mark the connection between a symbol node and the
# function node whose argument that symbol is.
self.params[name] = owner
class _Comprehension(object):
no_root = True
def __init__(self):
# TODO(mdan): Consider using an enum.
self.is_list_comp = False
self.targets = set()
class _FunctionOrClass(object):
def __init__(self):
self.node = None
class ActivityAnalyzer(transformer.Base):
"""Annotates nodes with local scope information.
See Scope.
The use of this class requires that qual_names.resolve() has been called on
the node. This class will ignore nodes have not been
annotated with their qualified names.
"""
def __init__(self, context, parent_scope=None):
super(ActivityAnalyzer, self).__init__(context)
self.allow_skips = False
self.scope = Scope(parent_scope, isolated=True)
# Note: all these flags crucially rely on the respective nodes are
# leaves in the AST, that is, they cannot contain other statements.
self._in_aug_assign = False
self._in_annotation = False
self._track_annotations_only = False
@property
def _in_constructor(self):
context = self.state[_FunctionOrClass]
if context.level > 2:
innermost = context.stack[-1].node
parent = context.stack[-2].node
return (isinstance(parent, gast.ClassDef) and
(isinstance(innermost, gast.FunctionDef) and
innermost.name == '__init__'))
return False
def _node_sets_self_attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
qn = anno.getanno(node, anno.Basic.QN)
# TODO(mdan): The 'self' argument is not guaranteed to be called 'self'.
if qn.has_attr and qn.parent.qn == ('self',):
return True
return False
def _track_symbol(self, node, composite_writes_alter_parent=False):
if self._track_annotations_only and not self._in_annotation:
return
# A QN may be missing when we have an attribute (or subscript) on a function
# call. Example: a().b
if not anno.hasanno(node, anno.Basic.QN):
return
qn = anno.getanno(node, anno.Basic.QN)
# When inside a comprehension, ignore reads to any of the comprehensions's
# targets. This includes attributes or slices of those arguments.
for l in self.state[_Comprehension]:
if qn in l.targets:
return
if qn.owner_set & set(l.targets):
return
if isinstance(node.ctx, gast.Store):
# In comprehensions, modified symbols are the comprehension targets.
if self.state[_Comprehension].level > 0:
self.state[_Comprehension].targets.add(qn)
return
self.scope.modified.add(qn)
self.scope.bound.add(qn)
if qn.is_composite and composite_writes_alter_parent:
self.scope.modified.add(qn.parent)
if self._in_aug_assign:
self.scope.read.add(qn)
elif isinstance(node.ctx, gast.Load):
self.scope.read.add(qn)
if self._in_annotation:
self.scope.annotations.add(qn)
elif isinstance(node.ctx, gast.Param):
self.scope.bound.add(qn)
self.scope.mark_param(qn, self.state[_FunctionOrClass].node)
elif isinstance(node.ctx, gast.Del):
# The read matches the Python semantics - attempting to delete an
# undefined symbol is illegal.
self.scope.read.add(qn)
# Targets of del are considered bound:
# https://docs.python.org/3/reference/executionmodel.html#binding-of-names
self.scope.bound.add(qn)
self.scope.deleted.add(qn)
else:
raise ValueError('Unknown context {} for node "{}".'.format(
type(node.ctx), qn))
def _enter_scope(self, isolated, f_name=None):
self.scope = Scope(self.scope, isolated=isolated, function_name=f_name)
def _exit_scope(self):
exited_scope = self.scope
exited_scope.finalize()
self.scope = exited_scope.parent
return exited_scope
def _exit_and_record_scope(self, node, tag=anno.Static.SCOPE):
node_scope = self._exit_scope()
anno.setanno(node, tag, node_scope)
return node_scope
def _process_statement(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
self._exit_and_record_scope(node)
return node
def _process_annotation(self, node):
self._in_annotation = True
node = self.visit(node)
self._in_annotation = False
return node
def visit_Import(self, node):
return self._process_statement(node)
def visit_ImportFrom(self, node):
return self._process_statement(node)
def visit_Global(self, node):
self._enter_scope(False)
for name in node.names:
qn = qual_names.QN(name)
self.scope.read.add(qn)
self.scope.globals.add(qn)
self._exit_and_record_scope(node)
return node
def visit_Nonlocal(self, node):
self._enter_scope(False)
for name in node.names:
qn = qual_names.QN(name)
self.scope.read.add(qn)
self.scope.bound.add(qn)
self.scope.nonlocals.add(qn)
self._exit_and_record_scope(node)
return node
def visit_Expr(self, node):
return self._process_statement(node)
def visit_Raise(self, node):
return self._process_statement(node)
def visit_Return(self, node):
return self._process_statement(node)
def visit_Assign(self, node):
return self._process_statement(node)
def visit_AnnAssign(self, node):
self._enter_scope(False)
node.target = self.visit(node.target)
if node.value is not None:
# Can be None for pure declarations, e.g. `n: int`. This is a new thing
# enabled by type annotations, but does not influence static analysis
# (declarations are not definitions).
node.value = self.visit(node.value)
if node.annotation:
node.annotation = self._process_annotation(node.annotation)
self._exit_and_record_scope(node)
return node
def visit_AugAssign(self, node):
# Special rules for AugAssign. Here, the AST only shows the target as
# written, when it is in fact also read.
self._enter_scope(False)
self._in_aug_assign = True
node.target = self.visit(node.target)
self._in_aug_assign = False
node.op = self.visit(node.op)
node.value = self.visit(node.value)
self._exit_and_record_scope(node)
return node
def visit_Delete(self, node):
return self._process_statement(node)
def visit_Name(self, node):
if node.annotation:
node.annotation = self._process_annotation(node.annotation)
self._track_symbol(node)
return node
def visit_alias(self, node):
node = self.generic_visit(node)
if node.asname is None:
# Only the root name is a real symbol operation.
qn = qual_names.QN(node.name.split('.')[0])
else:
qn = qual_names.QN(node.asname)
self.scope.modified.add(qn)
self.scope.bound.add(qn)
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if self._in_constructor and self._node_sets_self_attribute(node):
self._track_symbol(node, composite_writes_alter_parent=True)
else:
self._track_symbol(node)
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
# Subscript writes (e.g. a[b] = "value") are considered to modify
# both the element itself (a[b]) and its parent (a).
self._track_symbol(node)
return node
def visit_Print(self, node):
self._enter_scope(False)
node.values = self.visit_block(node.values)
node_scope = self._exit_and_record_scope(node)
anno.setanno(node, NodeAnno.ARGS_SCOPE, node_scope)
return node
def visit_Assert(self, node):
return self._process_statement(node)
def visit_Call(self, node):
self._enter_scope(False)
node.args = self.visit_block(node.args)
node.keywords = self.visit_block(node.keywords)
# TODO(mdan): Account starargs, kwargs
self._exit_and_record_scope(node, tag=NodeAnno.ARGS_SCOPE)
node.func = self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
self._enter_scope(False)
block = self.visit_block(block)
self._exit_and_record_scope(node, tag=scope_name)
return node
def _process_parallel_blocks(self, parent, children):
# Because the scopes are not isolated, processing any child block
# modifies the parent state causing the other child blocks to be
# processed incorrectly. So we need to checkpoint the parent scope so that
# each child sees the same context.
before_parent = Scope.copy_of(self.scope)
after_children = []
for child, scope_name in children:
self.scope.copy_from(before_parent)
parent = self._process_block_node(parent, child, scope_name)
after_child = Scope.copy_of(self.scope)
after_children.append(after_child)
for after_child in after_children:
self.scope.merge_from(after_child)
return parent
def _process_comprehension(self,
node,
is_list_comp=False,
is_dict_comp=False):
with self.state[_Comprehension] as comprehension_:
comprehension_.is_list_comp = is_list_comp
# Note: it's important to visit the generators first to properly account
# for the variables local to these generators. Example: `x` is local to
# the expression `z for x in y for z in x`.
node.generators = self.visit_block(node.generators)
if is_dict_comp:
node.key = self.visit(node.key)
node.value = self.visit(node.value)
else:
node.elt = self.visit(node.elt)
return node
def visit_comprehension(self, node):
# It is important to visit children in this order so that the reads to
# the target name are appropriately ignored.
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
return self.generic_visit(node)
def visit_DictComp(self, node):
return self._process_comprehension(node, is_dict_comp=True)
def visit_ListComp(self, node):
return self._process_comprehension(node, is_list_comp=True)
def visit_SetComp(self, node):
return self._process_comprehension(node)
def visit_GeneratorExp(self, node):
return self._process_comprehension(node)
def visit_ClassDef(self, node):
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The ClassDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
self.scope.modified.add(qual_names.QN(node.name))
self.scope.bound.add(qual_names.QN(node.name))
node.bases = self.visit_block(node.bases)
node.keywords = self.visit_block(node.keywords)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual class definition.
self._enter_scope(True)
node = self.generic_visit(node)
self._exit_scope()
return node
def _visit_node_list(self, nodes):
return [(None if n is None else self.visit(n)) for n in nodes]
def _visit_arg_annotations(self, node):
node.args.kw_defaults = self._visit_node_list(node.args.kw_defaults)
node.args.defaults = self._visit_node_list(node.args.defaults)
self._track_annotations_only = True
node = self._visit_arg_declarations(node)
self._track_annotations_only = False
return node
def _visit_arg_declarations(self, node):
node.args.posonlyargs = self._visit_node_list(node.args.posonlyargs)
node.args.args = self._visit_node_list(node.args.args)
if node.args.vararg is not None:
node.args.vararg = self.visit(node.args.vararg)
node.args.kwonlyargs = self._visit_node_list(node.args.kwonlyargs)
if node.args.kwarg is not None:
node.args.kwarg = self.visit(node.args.kwarg)
return node
def visit_FunctionDef(self, node):
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The FunctionDef node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node.decorator_list = self.visit_block(node.decorator_list)
if node.returns:
node.returns = self._process_annotation(node.returns)
# Argument annotartions (includeing defaults) affect the defining context.
node = self._visit_arg_annotations(node)
function_name = qual_names.QN(node.name)
self.scope.modified.add(function_name)
self.scope.bound.add(function_name)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual function definition.
self._enter_scope(True, node.name)
# Keep a separate scope for the arguments node, which is used in the CFG.
self._enter_scope(False, node.name)
# Arg declarations only affect the function itself, and have no effect
# in the defining context whatsoever.
node = self._visit_arg_declarations(node)
self._exit_and_record_scope(node.args)
# Track the body separately. This is for compatibility reasons, it may not
# be strictly needed.
self._enter_scope(False, node.name)
node.body = self.visit_block(node.body)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
self._exit_and_record_scope(node, NodeAnno.ARGS_AND_BODY_SCOPE)
return node
def visit_Lambda(self, node):
# Lambda nodes are treated in roughly the same way as FunctionDef nodes.
with self.state[_FunctionOrClass] as fn:
fn.node = node
# The Lambda node itself has a Scope object that tracks the creation
# of its name, along with the usage of any decorator accompanying it.
self._enter_scope(False)
node = self._visit_arg_annotations(node)
self._exit_and_record_scope(node)
# A separate Scope tracks the actual function definition.
self._enter_scope(True)
# Keep a separate scope for the arguments node, which is used in the CFG.
self._enter_scope(False)
node = self._visit_arg_declarations(node)
self._exit_and_record_scope(node.args)
# Track the body separately. This is for compatibility reasons, it may not
# be strictly needed.
# TODO(mdan): Do remove it, it's confusing.
self._enter_scope(False)
node.body = self.visit(node.body)
# The lambda body can contain nodes of types normally not found as
# statements, and may not have the SCOPE annotation needed by the CFG.
# So we attach one if necessary.
if not anno.hasanno(node.body, anno.Static.SCOPE):
anno.setanno(node.body, anno.Static.SCOPE, self.scope)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
lambda_scope = self.scope
self._exit_and_record_scope(node, NodeAnno.ARGS_AND_BODY_SCOPE)
# Exception: lambdas are assumed to be used in the place where
# they are defined. Therefore, their activity is passed on to the
# calling statement.
self.scope.read.update(lambda_scope.read - lambda_scope.bound)
return node
def visit_With(self, node):
self._enter_scope(False)
node = self.generic_visit(node)
self._exit_and_record_scope(node, NodeAnno.BODY_SCOPE)
return node
def visit_withitem(self, node):
return self._process_statement(node)
def visit_If(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
node_scope = self._exit_and_record_scope(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, node_scope)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
self._enter_scope(False)
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
self._exit_and_record_scope(node.iter)
self._enter_scope(False)
self.visit(node.target)
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
self._process_statement(anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST))
self._exit_and_record_scope(node, tag=NodeAnno.ITERATE_SCOPE)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
self._enter_scope(False)
node.test = self.visit(node.test)
node_scope = self._exit_and_record_scope(node.test)
anno.setanno(node, NodeAnno.COND_SCOPE, node_scope)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_ExceptHandler(self, node):
self._enter_scope(False)
# try/except oddity: as expected, it leaks any names you defined inside the
# except block, but not the name of the exception variable.
if node.name is not None:
self.scope.isolated_names.add(anno.getanno(node.name, anno.Basic.QN))
node = self.generic_visit(node)
self._exit_scope()
return node
def resolve(node, context, parent_scope=None):
return ActivityAnalyzer(context, parent_scope).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/static_analysis/activity.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Static information resolution.
This module contains utilities to help annotate AST nodes with as much runtime
information as can be possibly extracted without actually executing the code,
under that assumption that the context in which the code will run is known.
Overall, the different analyses have the functions listed below:
* activity: inventories symbols read, written to, params, etc. at different
levels
* liveness, reaching_definitions: dataflow analyses based on the program's CFG
and using the symbol information gathered by activity analysis
"""
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/static_analysis/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An analysis that determines the reach of a function definition.
A function definition is said to reach a statement if that function may exist
(and therefore may be called) when that statement executes.
"""
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import transformer
class Definition(object):
"""Definition objects describe a unique definition of a function."""
def __init__(self, def_node):
self.def_node = def_node
class _NodeState(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
self.value = set(init_from)
else:
self.value = set()
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self.value)
result.value.update(other.value)
return result
def __add__(self, value):
result = _NodeState(self.value)
result.value.add(value)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, external_defs):
super(Analyzer, self).__init__(graph)
# This allows communicating that nodes have extra reaching definitions,
# e.g. those that a function closes over.
self.external_defs = external_defs
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
if node is self.graph.entry:
defs_in = _NodeState(self.external_defs)
else:
defs_in = prev_defs_out
for n in node.prev:
defs_in |= self.out[n]
defs_out = defs_in
if isinstance(node.ast_node, (gast.Lambda, gast.FunctionDef)):
defs_out += node.ast_node
self.in_[node] = defs_in
self.out[node] = defs_out
return prev_defs_out != defs_out
class TreeAnnotator(transformer.Base):
"""AST visitor that annotates each symbol name with its reaching definitions.
Simultaneously, the visitor runs the dataflow analysis on each function node,
accounting for the effect of closures. For example:
def foo():
def f():
pass
def g():
# `def f` reaches here
"""
def __init__(self, source_info, graphs):
super(TreeAnnotator, self).__init__(source_info)
self.graphs = graphs
self.allow_skips = False
self.current_analyzer = None
def _proces_function(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
if (self.current_analyzer is not None
and node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
defined_in = self.current_analyzer.in_[cfg_node].value
else:
defined_in = ()
analyzer = Analyzer(subgraph, defined_in)
analyzer.visit_forward()
self.current_analyzer = analyzer
node = self.generic_visit(node)
self.current_analyzer = parent_analyzer
return node
def visit_FunctionDef(self, node):
return self._proces_function(node)
def visit_Lambda(self, node):
return self._proces_function(node)
def visit(self, node):
# This can happen before entering the top level function
if (self.current_analyzer is not None
and node in self.current_analyzer.graph.index):
cfg_node = self.current_analyzer.graph.index[node]
anno.setanno(node, anno.Static.DEFINED_FNS_IN,
self.current_analyzer.in_[cfg_node].value)
extra_node = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST, default=None)
if extra_node is not None:
cfg_node = self.current_analyzer.graph.index[extra_node]
anno.setanno(extra_node, anno.Static.DEFINED_FNS_IN,
self.current_analyzer.in_[cfg_node].value)
return super(TreeAnnotator, self).visit(node)
def resolve(node, source_info, graphs):
"""Resolves reaching definitions for each symbol.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
Returns:
ast.AST
"""
visitor = TreeAnnotator(source_info, graphs)
node = visitor.visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/static_analysis/reaching_fndefs.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Annotations used by the static analyzer."""
from enum import Enum
# TODO(mdan): Remove.
class NoValue(Enum):
def __repr__(self): # pylint: disable=invalid-repr-returned
return self.name
class NodeAnno(NoValue):
"""Additional annotations used by the static analyzer.
These are in addition to the basic annotations declared in anno.py.
"""
# Symbols
# These flags are boolean.
IS_LOCAL = 'Symbol is local to the function scope being analyzed.'
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
IS_MODIFIED_SINCE_ENTRY = (
'Symbol has been explicitly replaced in the current function scope.')
# Scopes
# Scopes are represented by objects of type activity.Scope.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
ITERATE_SCOPE = 'The scope for the iterate assignment of a for loop.'
ARGS_AND_BODY_SCOPE = (
'The scope for the main body of a function or lambda, including its'
' arguments.')
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/static_analysis/annos.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reaching definition analysis.
This analysis attaches a set of a Definition objects to each symbol, one
for each distinct definition that may reach it. The Definition objects are
mutable and may be used by subsequent analyses to further annotate data like
static type and value information.
The analysis also attaches the set of the symbols defined at the entry of
control flow statements.
Requires activity analysis.
"""
import weakref
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import transformer
class Definition(object):
"""Definition objects describe a unique definition of a variable.
Subclasses of this may be used by passing an appropriate factory function to
resolve.
Attributes:
param_of: Optional[ast.AST]
directives: Dict, optional definition annotations
"""
def __init__(self):
self.param_of = None
self.directives = {}
def __repr__(self):
return '%s[%d]' % (self.__class__.__name__, id(self))
class _NodeState(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
if isinstance(init_from, _NodeState):
self.value = {
s: set(other_infos) for s, other_infos in init_from.value.items()
}
elif isinstance(init_from, dict):
self.value = {s: set((init_from[s],)) for s in init_from}
else:
assert False, init_from
else:
self.value = {}
def __eq__(self, other):
if frozenset(self.value.keys()) != frozenset(other.value.keys()):
return False
ret = all(self.value[s] == other.value[s] for s in self.value)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self)
for s, other_infos in other.value.items():
if s in result.value:
result.value[s].update(other_infos)
else:
result.value[s] = set(other_infos)
return result
def __sub__(self, other):
assert isinstance(other, set)
result = _NodeState(self)
for s in other:
result.value.pop(s, None)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, definition_factory):
self._definition_factory = definition_factory
super(Analyzer, self).__init__(graph)
self.gen_map = {}
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
defs_in = _NodeState()
for n in node.prev:
defs_in |= self.out[n]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
# The definition objects created by each node must be singletons because
# their ids are used in equality checks.
if node not in self.gen_map:
node_symbols = {}
# Every binding operation (assign, nonlocal, global, etc.) counts as a
# definition, with the exception of del, which only deletes without
# creating a new variable.
newly_defined = ((node_scope.bound | node_scope.globals) -
node_scope.deleted)
for s in newly_defined:
def_ = self._definition_factory()
node_symbols[s] = def_
# Every param receives a definition. Params are not necessarily
# considered as "modified".
for s, p in node_scope.params.items():
def_ = self._definition_factory()
def_.param_of = weakref.ref(p)
node_symbols[s] = def_
self.gen_map[node] = _NodeState(node_symbols)
gen = self.gen_map[node]
kill = node_scope.modified | node_scope.deleted
defs_out = gen | (defs_in - kill)
gen = self.gen_map[node]
defs_out = gen | (defs_in - kill)
else:
assert self.can_ignore(node), (node.ast_node, node)
defs_out = defs_in
self.in_[node] = defs_in
self.out[node] = defs_out
return prev_defs_out != defs_out
class TreeAnnotator(transformer.Base):
"""AST visitor that annotates each symbol name with its reaching definitions.
Simultaneously, the visitor runs the dataflow analysis on each function node,
accounting for the effect of closures. For example:
def foo():
bar = 1
def baz():
# bar = 1 reaches here
"""
def __init__(self, source_info, graphs, definition_factory):
super(TreeAnnotator, self).__init__(source_info)
self.allow_skips = False
self.definition_factory = definition_factory
self.graphs = graphs
self.current_analyzer = None
self.current_cfg_node = None
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
analyzer = Analyzer(subgraph, self.definition_factory)
analyzer.visit_forward()
# Recursively process any remaining subfunctions.
self.current_analyzer = analyzer
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
self.current_analyzer = parent_analyzer
return node
def visit_Name(self, node):
if self.current_analyzer is None:
# Names may appear outside function defs - for example in class
# definitions.
return node
analyzer = self.current_analyzer
cfg_node = self.current_cfg_node
assert cfg_node is not None, ('name node, %s, outside of any statement?'
% node.id)
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.in_[cfg_node].value.get(qn, ())))
else:
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.out[cfg_node].value.get(qn, ())))
return node
def _aggregate_predecessors_defined_in(self, node):
preds = self.current_analyzer.graph.stmt_prev[node]
node_defined_in = set()
for p in preds:
node_defined_in |= set(self.current_analyzer.out[p].value.keys())
anno.setanno(node, anno.Static.DEFINED_VARS_IN, frozenset(node_defined_in))
def visit_If(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_For(self, node):
self._aggregate_predecessors_defined_in(node)
# Manually accounting for the shortcoming described in
# cfg.AstToCfg.visit_For.
parent = self.current_cfg_node
self.current_cfg_node = self.current_analyzer.graph.index[node.iter]
node.target = self.visit(node.target)
self.current_cfg_node = parent
node.iter = self.visit(node.iter)
node.body = self.visit_block(node.body)
node.orelse = self.visit_block(node.orelse)
return node
def visit_While(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_Try(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_ExceptHandler(self, node):
self._aggregate_predecessors_defined_in(node)
# TODO(mdan): Also track the exception type / name symbols.
node.body = self.visit_block(node.body)
return node
def visit(self, node):
parent = self.current_cfg_node
if (self.current_analyzer is not None and
node in self.current_analyzer.graph.index):
self.current_cfg_node = self.current_analyzer.graph.index[node]
node = super(TreeAnnotator, self).visit(node)
self.current_cfg_node = parent
return node
def resolve(node, source_info, graphs, definition_factory=Definition):
"""Resolves reaching definitions for each symbol.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
definition_factory: Callable[[], Definition]
Returns:
ast.AST
"""
visitor = TreeAnnotator(source_info, graphs, definition_factory)
node = visitor.visit(node)
return node
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/static_analysis/reaching_definitions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion to A-normal form.
The general idea of A-normal form is that every intermediate value is
explicitly named with a variable. For more, see
https://en.wikipedia.org/wiki/A-normal_form.
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
"""
import collections
import gast
import six
from nvidia.dali._autograph.pyct import gast_util
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct import transformer
# TODO(mdan): Replace with naming.Namer.
class DummyGensym(object):
"""A dumb gensym that suffixes a stem by sequential numbers from 1000."""
def __init__(self):
# A proper implementation needs to account for:
# * ctx.info.namespace
# * all the symbols defined in the AST
# * the symbols generated so far
self._idx = 0
def new_name(self, stem='tmp'):
self._idx += 1
return stem + '_' + str(1000 + self._idx)
REPLACE = lambda _1, _2, _3: True
LEAVE = lambda _1, _2, _3: False
ANY = object()
class ASTEdgePattern(collections.namedtuple(
'ASTEdgePattern', ['parent', 'field', 'child'])):
"""A pattern defining a type of AST edge.
This consists of three components:
- The type of the parent node, checked with isinstance,
- The name of the field, checked with string equality, and
- The type of the child node, also checked with isinstance.
If all three match, the whole pattern is considered to match.
In all three slots, the special value `anf.ANY` is treated as "match
anything". The internal nodes are produced from the `gast` library rather
than the standard `ast` module, which may affect `isinstance` checks.
"""
__slots__ = ()
def matches(self, parent, field, child):
"""Computes whether this pattern matches the given edge."""
if self.parent is ANY or isinstance(parent, self.parent):
pass # OK
else:
return False
if self.field is ANY or field == self.field:
pass # OK
else:
return False
return self.child is ANY or isinstance(child, self.child)
class AnfTransformer(transformer.Base):
"""Performs the conversion to A-normal form (ANF)."""
# The algorithm is a postorder recursive tree walk. Any given node A may, in
# general, require creation of a series B of Assign statements, which compute
# and explicitly name the intermediate values needed to compute the value of
# A. If A was already a statement, it can be replaced with the sequence B +
# [A]. If A was an expression, B needs to be propagated up the tree until a
# statement is encountered. Since the `ast.NodeTransformer` framework makes
# no provision for subtraversals returning side information, this class
# accumulates the sequence B in an instance variable.
# The only other subtlety is that some Python statements (like `if`) have both
# expression fields (`test`) and statement list fields (`body` and `orelse`).
# Any additional assignments needed to name all the intermediate values in the
# `test` can be prepended to the `if` node, but assignments produced by
# processing the `body` and the `orelse` need to be kept together with them,
# and not accidentally lifted out of the `if`.
def __init__(self, ctx, config):
"""Creates an ANF transformer.
Args:
ctx: transformer.Context
config: Configuration
"""
super(AnfTransformer, self).__init__(ctx)
if config is None:
# These could be pulled out, but are generally considered to already be in
# A-normal form. Thus they are left in by default, but could be pulled
# out if the configuration calls for it.
# Name is here to cover True, False, and None in Python 2
literal_node_types = (gast.Constant, gast.Name,)
self._overrides = [
(ASTEdgePattern(ANY, ANY, literal_node_types), LEAVE),
(ASTEdgePattern(ANY, ANY, gast.expr), REPLACE)]
else:
self._overrides = config
self._gensym = DummyGensym()
self._pending_statements = []
def _consume_pending_statements(self):
ans = self._pending_statements
self._pending_statements = []
return ans
def _add_pending_statement(self, stmt):
self._pending_statements.append(stmt)
def _match(self, pattern, parent, field, child):
if pattern is ANY:
return True
else:
return pattern.matches(parent, field, child)
def _should_transform(self, parent, field, child):
for pat, result in self._overrides:
if self._match(pat, parent, field, child):
return result(parent, field, child)
# Fell off the end of the pattern list: do not transform
return False
def _do_transform_node(self, node):
temp_name = self._gensym.new_name()
temp_assign = templates.replace(
'temp_name = expr', temp_name=temp_name, expr=node)[0]
self._add_pending_statement(temp_assign)
answer = templates.replace('temp_name', temp_name=temp_name)[0]
return answer
def _ensure_node_in_anf(self, parent, field, node):
"""Puts `node` in A-normal form, by replacing it with a variable if needed.
The exact definition of A-normal form is given by the configuration. The
parent and the incoming field name are only needed because the configuration
may be context-dependent.
Args:
parent: An AST node, the parent of `node`.
field: The field name under which `node` is the child of `parent`.
node: An AST node, potentially to be replaced with a variable reference.
Returns:
node: An AST node; the argument if transformation was not necessary,
or the new variable reference if it was.
"""
if node is None:
return node
if _is_trivial(node):
return node
if isinstance(node, list):
# If something's field was actually a list, e.g., variadic arguments.
return [self._ensure_node_in_anf(parent, field, n) for n in node]
if isinstance(node, gast.keyword):
node.value = self._ensure_node_in_anf(parent, field, node.value)
return node
if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):
# These nodes aren't really extractable in their own right, but their
# subnodes might be. Propagate the parent and field name to the child
# nodes, instead of querying the configuration for children of, e.g.,
# gast.Starred.
return self._ensure_fields_in_anf(node, parent, field)
if self._should_transform(parent, field, node):
return self._do_transform_node(node)
else:
return node
def _ensure_fields_in_anf(self, node, parent=None, super_field=None):
for field in node._fields:
if field.startswith('__'):
continue
parent_supplied = node if parent is None else parent
field_supplied = field if super_field is None else super_field
setattr(node, field, self._ensure_node_in_anf(
parent_supplied, field_supplied, getattr(node, field)))
return node
def _visit_strict_statement(self, node, children_ok_to_transform=True):
assert not self._pending_statements
node = self.generic_visit(node)
if children_ok_to_transform:
self._ensure_fields_in_anf(node)
results = self._consume_pending_statements()
results.append(node)
return results
def _visit_trivial_only_statement(self, node, msg):
assert not self._pending_statements
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
if self._pending_statements:
raise ValueError(msg)
else:
return node
def _visit_strict_expression(self, node):
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
return node
def _visit_trivial_only_expression(self, node, msg):
k = len(self._pending_statements)
node = self.generic_visit(node)
self._ensure_fields_in_anf(node)
# This check relies on there being no opportunities to consume pending
# statements while traversing children of an expression.
if len(self._pending_statements) != k:
raise ValueError(msg)
else:
return node
# Note on code order: These are listed in the same order as the grammar
# elements on https://github.com/serge-sans-paille/gast
# FunctionDef, AsyncFunctionDef, and ClassDef should be correct by default.
def visit_Return(self, node):
return self._visit_strict_statement(node)
def visit_Delete(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Assign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_AugAssign(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
def visit_Print(self, node):
return self._visit_strict_statement(node)
def visit_For(self, node):
assert not self._pending_statements
# It's important to visit node.iter first, because any statements created
# thereby need to live outside the body.
self.visit(node.iter)
node.iter = self._ensure_node_in_anf(node, 'iter', node.iter)
iter_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.iter, but that is correct because by
# this point the node.iter link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.iter, as
# then it may be large and will be uselessly transformed again. This
# behavior is what causes the documented effect that configuration callables
# may be invoked more than once of the same links; if the code is rewritten
# not to do that (anywhere), the docstring of `transform` should be updated.
node = self.generic_visit(node)
assert not self._pending_statements
iter_stmts.append(node)
return iter_stmts
def visit_AsyncFor(self, node):
msg = ('Nontrivial AsyncFor nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_While(self, node):
assert not self._pending_statements
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
if self._pending_statements:
msg = ('While with nontrivial test not supported yet '
'(need to avoid precomputing the test).')
raise ValueError(msg)
# If traversing node.test yielded no statements extracted, the generic visit
# will do the right thing.
return self.generic_visit(node)
def visit_If(self, node):
assert not self._pending_statements
# It's important to visit node.test first, because any statements created
# thereby need to live outside the body.
self.visit(node.test)
node.test = self._ensure_node_in_anf(node, 'test', node.test)
condition_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.test, but that is correct because by
# this point the node.test link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.test, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
condition_stmts.append(node)
return condition_stmts
def visit_With(self, node):
assert not self._pending_statements
# It's important to visit node.items first, because any statements created
# thereby need to live outside the body.
for item in node.items:
self.visit(item)
node.items = [self._ensure_node_in_anf(node, 'items', n)
for n in node.items]
contexts_stmts = self._consume_pending_statements()
# This generic_visit will revisit node.items, but that is correct because by
# this point the node.items link has been checked. It may be somewhat
# expensive if the configuration didn't call for transforming node.items, as
# then it may be large and will be uselessly transformed again. This
# happens in several places.
node = self.generic_visit(node)
assert not self._pending_statements
contexts_stmts.append(node)
return contexts_stmts
def visit_AsyncWith(self, node):
msg = ('Nontrivial AsyncWith nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_statement(node, msg)
def visit_Raise(self, node):
return self._visit_strict_statement(node)
# Try should be correct by default.
def visit_Assert(self, node):
msg = ('Nontrivial Assert nodes not supported yet '
'(need to avoid computing the test when assertions are off, and '
'avoid computing the irritant when the assertion does not fire).')
return self._visit_trivial_only_statement(node, msg)
# Import and ImportFrom should be correct by default.
def visit_Exec(self, node):
return self._visit_strict_statement(node)
# Global and Nonlocal should be correct by default.
def visit_Expr(self, node):
return self._visit_strict_statement(node, children_ok_to_transform=False)
# Pass, Break, and Continue should be correct by default.
def visit_BoolOp(self, node):
msg = ('Nontrivial BoolOp nodes not supported yet '
'(need to preserve short-circuiting semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_BinOp(self, node):
return self._visit_strict_expression(node)
def visit_UnaryOp(self, node):
return self._visit_strict_expression(node)
def visit_Lambda(self, node):
msg = ('Nontrivial Lambda nodes not supported '
'(cannot insert statements into lambda bodies).')
return self._visit_trivial_only_expression(node, msg)
def visit_IfExp(self, node):
msg = ('Nontrivial IfExp nodes not supported yet '
'(need to convert to If statement, to evaluate branches lazily '
'and insert statements into them).')
return self._visit_trivial_only_expression(node, msg)
def visit_Dict(self, node):
return self._visit_strict_expression(node)
def visit_Set(self, node):
return self._visit_strict_expression(node)
def visit_ListComp(self, node):
msg = ('ListComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_SetComp(self, node):
msg = ('SetComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_DictComp(self, node):
msg = ('DictComp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_GeneratorExp(self, node):
msg = ('GeneratorExp nodes not supported '
'(need to convert to a form that tolerates '
'assignment statements in clause bodies).')
raise ValueError(msg)
def visit_Await(self, node):
msg = ('Nontrivial Await nodes not supported yet '
'(need to think through the semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_Yield(self, node):
return self._visit_strict_expression(node)
def visit_YieldFrom(self, node):
msg = ('Nontrivial YieldFrom nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Compare(self, node):
if len(node.ops) > 1:
msg = ('Multi-ary compare nodes not supported yet '
'(need to preserve short-circuiting semantics).')
raise ValueError(msg)
return self._visit_strict_expression(node)
def visit_Call(self, node):
return self._visit_strict_expression(node)
def visit_Repr(self, node):
msg = ('Nontrivial Repr nodes not supported yet '
'(need to research their syntax and semantics).')
return self._visit_trivial_only_expression(node, msg)
def visit_FormattedValue(self, node):
msg = ('Nontrivial FormattedValue nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_JoinedStr(self, node):
msg = ('Nontrivial JoinedStr nodes not supported yet '
'(need to unit-test them in Python 2).')
return self._visit_trivial_only_expression(node, msg)
def visit_Attribute(self, node):
return self._visit_strict_expression(node)
def visit_Subscript(self, node):
return self._visit_strict_expression(node)
# Starred and Name are correct by default, because the right thing to do is to
# just recur.
def visit_List(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def visit_Tuple(self, node):
node = self.generic_visit(node)
if not isinstance(node.ctx, gast.Store):
self._ensure_fields_in_anf(node)
return node
def _is_py2_name_constant(node):
return isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']
def _is_trivial(node):
"""Returns whether to consider the given node 'trivial'.
The definition of 'trivial' is a node that can't meaningfully be pulled out
into its own assignment statement.
This is surprisingly difficult to do robustly across versions of Python and
gast, as the parsing of constants has changed, if I may, constantly.
Args:
node: An AST node to check for triviality
Returns:
trivial: A Python `bool` indicating whether the node is trivial.
"""
trivial_node_types = (
# Variable names
gast.Name,
# Non-nodes that show up as AST fields
bool, six.string_types,
# Binary operators
gast.Add, gast.Sub, gast.Mult, gast.Div, gast.Mod, gast.Pow,
gast.LShift, gast.RShift, gast.BitOr, gast.BitXor, gast.BitAnd,
gast.FloorDiv,
# Unary operators
gast.Invert, gast.Not, gast.UAdd, gast.USub,
# Comparison operators
gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt, gast.GtE,
gast.Is, gast.IsNot, gast.In, gast.NotIn,
# Other leaf nodes that don't make sense standalone.
gast.expr_context,
)
if isinstance(node, trivial_node_types) and not _is_py2_name_constant(node):
return True
if gast_util.is_ellipsis(node):
return True
return False
def transform(node, ctx, config=None):
"""Converts the given node to A-normal form (ANF).
The general idea of A-normal form: https://en.wikipedia.org/wiki/A-normal_form
The specific converters used here are based on Python AST semantics as
documented at https://greentreesnakes.readthedocs.io/en/latest/.
What exactly should be considered A-normal form for any given programming
language is not completely obvious. The transformation defined here is
therefore configurable as to which syntax to replace with a fresh variable and
which to leave be. The configuration is intentionally flexible enough to
define very precise variable insertion transformations, should that be
desired.
The configuration is a list of syntax rules, each of which is a 2-tuple:
- An `ASTEdgePattern` (which see) defining a type of AST edge, and
- Whether to transform children of such edges.
The special object `anf.ANY` may be used as a pattern that matches all edges.
Each replacement directive is one of three possible things:
- The object `anf.REPLACE`, meaning "Replace this child node with a variable",
- The object `anf.LEAVE`, meaning "Do not replace this child node with a
variable", or
- A Python callable. If a callable, it is called with the parent node, the
field name, and the child node, and must compute a boolean indicating
whether to transform the child node or not. The callable is free to use
whatever context information it chooses. The callable may be invoked more
than once on the same link, and must produce the same answer each time.
The syntax rules are tested in order, and the first match governs. If no rule
matches, the node is not transformed.
The above rules notwithstanding,
- Variable references are never replaced with (fresh) variables, as that would
accomplish nothing.
- The left-hand children of Assign and AugAssign nodes, and the children of
Del nodes, are never replaced with variables, as that would break their
semantics.
- The right-hand children of Assign nodes are never replaced with variables,
as the original assignment would still have to be present in the result
to define the new variable. (That is, there's no point in transforming
`x = sin(y)` into `tmp = sin(y); x = tmp`.)
- The right-hand children of AugAssign nodes are never replaced with variables
either, but only because the difference from Assign was considered a
potential source of confusion (and it would have been slightly awkward in
the code to treat the RHS differently than the LHS).
- Various special-purpose AST nodes are not exposed to the configuration, lest
the transform produce invalid syntax like, e.g., `tmp = +; x = 1 tmp 2`.
For example, the configuration
```python
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
gives explicit fresh names to all expressions regardless of context (except as
outlined above), whereas
```python
[(anf.ASTEdgePattern(gast.If, "test", anf.ANY), anf.REPLACE)]
```
only transforms the conditionals of `if` statements (but not, e.g., `while`).
If no configuration is supplied, the default behavior is to transform all
expressions except literal constants, which is defined as a configuration as
```python
# For Python 3, and gast library versions before 0.3
literals = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant)
[(anf.ASTEdgePattern(anf.ANY, anf.ANY, literals), anf.LEAVE),
(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)]
```
Args:
node: The node to transform.
ctx: transformer.EntityInfo. TODO(mdan): What information does this
argument provide?
config: Optional ANF configuration. If omitted, ANF replaces all expression
expect literal constants.
"""
return AnfTransformer(ctx, config).visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/common_transformers/anf.py
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/common_transformers/__init__.py
|
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/testing/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with basic entity definitions for testing."""
def simple_function(x):
"""Docstring."""
return x # comment
def nested_functions(x):
"""Docstring."""
def inner_fn(y):
return y
return inner_fn(x)
def function_with_print():
print('foo')
simple_lambda = lambda: None
class SimpleClass(object):
def simple_method(self):
return self
def method_with_print(self):
print('foo')
def function_with_multiline_call(x):
"""Docstring."""
return range(
x,
x + 1,
)
def basic_decorator(f):
return f
@basic_decorator
@basic_decorator
def decorated_function(x):
if x > 0:
return 1
return 2
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/testing/basic_definitions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random code generation for testing/fuzzing."""
# pylint: disable=invalid-name
import random
import string
import gast
import numpy as np
from nvidia.dali._autograph.pyct import gast_util
from nvidia.dali._autograph.pyct import templates
class NodeSampler(object):
sample_map = None
def sample(self):
nodes, magnitudes = zip(*self.sample_map.items())
return np.random.choice(
nodes, p=np.array(magnitudes, dtype='float32') / np.sum(magnitudes))
class StatementSampler(NodeSampler):
sample_map = dict((
(gast.Assign, 10),
(gast.Print, 1),
(gast.If, 2),
(gast.While, 2),
(gast.For, 0),
))
class ExpressionSampler(NodeSampler):
sample_map = dict((
(gast.UnaryOp, 1),
(gast.BinOp, 8),
(gast.Name, 1),
(gast.Call, 0),
))
class CompareSampler(NodeSampler):
sample_map = dict((
(gast.Eq, 1),
(gast.NotEq, 1),
(gast.Lt, 1),
(gast.LtE, 1),
(gast.Gt, 1),
(gast.GtE, 1),
(gast.Is, 1),
(gast.IsNot, 1),
))
class BinaryOpSampler(NodeSampler):
sample_map = dict((
(gast.Add, 1),
(gast.Sub, 1),
(gast.Mult, 1),
(gast.Div, 1),
(gast.FloorDiv, 1),
(gast.Mod, 1),
(gast.Pow, 1),
))
class UnaryOpSampler(NodeSampler):
sample_map = dict(((gast.USub, 1), (gast.UAdd, 0)))
class NameSampler(NodeSampler):
sample_map = dict((
('new', 1),
('existing', 1),
))
N_CONTROLFLOW_STATEMENTS = 10
N_FUNCTIONDEF_STATEMENTS = 10
class CodeGenerator(object):
"""Generate random syntactically-valid Python ASTs."""
def __init__(self, max_depth=3, depth=0):
self.max_depth = max_depth
self.depth = depth
def generate_statement(self):
"""Generate a statement node, dispatching to the correct class method."""
desired_node = StatementSampler().sample()
self.depth += 1
# Enforce some constraints on generating statements.
# E.g., if statements need at least 3 readable variables.
# If we fail to satisfy our constraints, draw another sample.
if desired_node in (gast.While, gast.For, gast.If):
if self.depth > self.max_depth:
return self.generate_statement()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
visitor = getattr(self, method)
node = visitor()
self.depth -= 1
return node
def sample_node_list(self, low, high, generator):
"""Generate a list of statements of random length.
Args:
low: Fewest number of statements to generate.
high: Highest number of statements to generate.
generator: Function to call to generate nodes.
Returns:
A list of statements.
"""
statements = []
for _ in range(np.random.randint(low, high)):
statements.append(generator())
return statements
def generate_Name(self, ctx=gast.Load()):
variable_name = '_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(4))
return gast.Name(variable_name, ctx=ctx, annotation=None)
def generate_BinOp(self):
# TODO(alexbw): convert to generate_expression when we get to limit
# expression depth.
op = BinaryOpSampler().sample()()
return gast.BinOp(self.generate_Name(), op, self.generate_Name())
def generate_Compare(self):
op = CompareSampler().sample()()
return gast.Compare(self.generate_Name(), [op], [self.generate_Name()])
def generate_UnaryOp(self):
operand = self.generate_Name()
op = UnaryOpSampler().sample()()
return gast.UnaryOp(op, operand)
def generate_expression(self):
desired_node = ExpressionSampler().sample()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
generator = getattr(self, method)
return generator()
def generate_Assign(self):
"""Generate an Assign node."""
# Generate left-hand side
target_node = self.generate_Name(gast.Store())
# Generate right-hand side
value_node = self.generate_expression()
# Put it all together
node = gast_util.compat_assign(targets=[target_node], value=value_node, type_comment=None)
return node
def generate_If(self):
"""Generate an If node."""
test = self.generate_Compare()
# Generate true branch statements
body = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
# Generate false branch statements
orelse = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
node = gast.If(test, body, orelse)
return node
def generate_While(self):
"""Generate a While node."""
test = self.generate_Compare()
body = self.sample_node_list(
low=1, high=N_CONTROLFLOW_STATEMENTS, generator=self.generate_statement)
orelse = [] # not generating else statements
node = gast.While(test, body, orelse)
return node
def generate_Call(self):
raise NotImplementedError
def generate_Return(self):
return gast.Return(self.generate_expression())
def generate_Print(self):
return templates.replace('print(x)', x=self.generate_expression())[0]
def generate_FunctionDef(self):
"""Generate a FunctionDef node."""
# Generate the arguments, register them as available
arg_vars = self.sample_node_list(
low=2, high=10, generator=lambda: self.generate_Name(gast.Param()))
args = gast.arguments(arg_vars, None, [], [], None, [])
# Generate the function body
body = self.sample_node_list(
low=1, high=N_FUNCTIONDEF_STATEMENTS, generator=self.generate_statement)
body.append(self.generate_Return())
fn_name = self.generate_Name().id
node = gast.FunctionDef(fn_name, args, body, (), None)
return node
def generate_random_functiondef():
return CodeGenerator().generate_FunctionDef()
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/testing/codegen.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with test decorators."""
import functools
def wrapping_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def standalone_decorator(f):
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return standalone_wrapper
def functional_decorator():
def decorator(f):
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return functional_wrapper
return decorator
|
DALI-main
|
dali/python/nvidia/dali/_autograph/pyct/testing/decorators.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import json
import struct
def float_to_hex(f):
hex_val = hex(struct.unpack('<I', struct.pack('<f', f))[0])
hex_val = hex_val[2:]
return hex_val
def export_to_trt_calib(filename, trt_version):
# Load precision config file
with open(filename, "r") as f:
json_dict = json.load(f)
# Create new files
with open(filename.replace(".json", "_calib.cache"),
"w") as f_calib, open(filename.replace(".json", "_layer_arg.txt"),
"w") as f_layer_precision_arg:
f_calib.write(f"TRT-{trt_version}-EntropyCalibration2\n")
int8_tensor_scales = json_dict["int8_tensor_scales"]
for layer_name, scale in int8_tensor_scales.items():
# Convert INT8 ranges to scales to HEX
scale_hex = float_to_hex(scale)
f_calib.write(f"{layer_name}: {scale_hex}\n")
fp16_nodes = json_dict["fp16_nodes"]
# Save list of all layers that need to run in FP16 for later use with TensorRT
f_layer_precision_list = [f"{x}:fp16" for x in fp16_nodes]
f_layer_precision_arg.write(",".join(f_layer_precision_list))
|
Deep-Learning-Accelerator-SW-main
|
tools/qdq-translator/utils.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""Parsing the ONNX model and retrieving the scaling factors."""
import logging
from argparse import ArgumentParser
import json
import os
from typing import Dict, Union, Set, List
import onnx
import onnx_graphsurgeon as gs
from onnx_graphsurgeon import Variable, Node
import onnxoptimizer
import numpy as np
from utils import export_to_trt_calib
DEFAULT_TRT_VERSION = 8600
WEIGHT_SCALE_TOLERANCE = 1e-3
DEFAULT_OPS_TO_INFER_ADJACENT_SCALES = {
'MaxPool', 'Flatten', 'Relu', 'Pad', 'Transpose', 'Reshape', 'Squeeze', 'Sigmoid'
}
ARGPARSER = ArgumentParser('Translate an ONNX model with Q/DQ nodes into an ONNX model'
' without Q/DQ nodes plus a JSON file with int8 tensor scales'
' and tensors that need to run at higher precision.'
' This script also generates a calibration cache file and the argument for'
' --layerPrecisions to use with trtexec.')
ARGPARSER.add_argument('--input_onnx_models',
'-i',
type=str,
required=True,
nargs='+',
help='A space-separated list of input ONNX models with Q/DQ nodes.')
ARGPARSER.add_argument('--output_dir',
'-o',
type=str,
default='./translated',
help='The output directory where the ONNX file(s) without'
' Q/DQ nodes and their respective precision configs are placed.'
' Additionally exports this info in a trtexec-friendly format.'
' Default: "./translated"')
ARGPARSER.add_argument('--trt_calib_version',
type=str,
default=DEFAULT_TRT_VERSION,
help=f'The TensorRT version used for exporting a trtexec-compatible calib'
f' cache. Default: "{DEFAULT_TRT_VERSION}".')
ARGPARSER.add_argument('--infer_average_pool_scales',
action='store_true',
help='If set, derive missing input or output scales of (Global)AveragePool and ReduceMean from'
' upstream or downstream tensors (assuming that they do not change). Can help with reducing'
' latency but may also result in lower accuracy compared to running more ops'
' at higher precision.')
ARGPARSER.add_argument('--infer_concat_scales',
action='store_true',
help='If set, derive missing input/output scales for Concat layer. Since concat is a multi-input'
' layer, the output scale is set to the maximum existing input scale. This may help'
' some models, such as Inception-V3, maintain their accuracy.')
ARGPARSER.add_argument('--infer_mul_scales',
action='store_true',
help='If set, derive missing scales for Mul operation.'
' Can help with reducing latency but may also result in lower accuracy compared to running more'
' ops at higher precision.')
ARGPARSER.add_argument('--calibration_type',
type=str,
default='max',
help='If the maximum (max) calibration method is employed, it is imperative to conduct a range check on the domain of values for the scale parameter.'
'In contrast, if the histogram calibration method is adopted, this particular examination can be deemed unnecessary and therefore skipped.')
ARGPARSER.add_argument('--addtl_ops_to_infer_adjacent_scales',
type=str,
nargs='+',
default=list(),
help='A space-separated list of ONNX operators whose scales can be'
' propagated upstream and/or downstream, in addition to the default'
f' ops used in this manner: {DEFAULT_OPS_TO_INFER_ADJACENT_SCALES}.')
ARGPARSER.add_argument('--rename_node_outputs',
action='store_true',
help='Rename each tensor to its producer node\'s name.')
ARGPARSER.add_argument(
'--add_unary_ew_scales_for_dla',
action='store_true',
help=
'For element-wise nodes consuming from Convs w/o Q/DQ nodes in between, insert unary scales.')
ARGPARSER.add_argument('--verbose', action='store_true', help='Increase verbosity.')
class QATModelParser:
"""
Parse the QAT ONNX model with the following steps:
1. Iterate the model to find/save the scaling factor for each activation.
2. Remove QuantizeLinear and DequantizeLinear nodes.
"""
_fake_quantize_per_tensor_affine_err_hint = (
"This is possibly caused by symbolic variables were not converted to tensors during PyTorch "
"to ONNX exporting. Please ensure that the exporting function follows the official "
"fake_quantize_per_tensor_affine exporting scheme: "
"https://github.com/pytorch/pytorch/blob/18dd5cd/torch/onnx/symbolic_opset10.py#L300-L308")
_fake_quantize_per_channel_affine_err_hint = (
"This is possibly caused by symbolic variables were converted to tensors, or `scale` variable "
"was cased to `Float` type during PyTorch to ONNX exporting. Please ensure that "
"the exporting function follows the official fake_quantize_per_channel_affine exporting "
"scheme: "
"https://github.com/pytorch/pytorch/blob/18dd5cd/torch/onnx/symbolic_opset13.py#L132-L146")
@staticmethod
def get_quantized_tensor(node: gs.Node, graph: gs.Graph) -> Union[gs.Variable, gs.Constant]:
"""
Return the input tensor from a quantize node.
If the quantize node is applied to model weights, this function returns the model weights tensor.
If the quantize node is applied to activation, this function returns the activation tensor.
Args:
node(gs.Node): QuantizeLinear node.
Returns:
(gs.Tensor): The tensor quantized by the quantize node.
"""
def convert_constant_to_variable_node(nodes_to_convert: List[gs.Node]):
"""
Ensure support for TF-generated ONNX models by converting selected gs.Constant nodes to gs.Variable
filled by a gs.Constant operator. The proposed fix updates node_input in-memory.
Error being fixed: "RuntimeError: Expected activation quantizer arguments to be Variables, but got
(<class 'onnx_graphsurgeon.ir.tensor.Variable'>, <class 'onnx_graphsurgeon.ir.tensor.Constant'>,
<class 'onnx_graphsurgeon.ir.tensor.Constant'>). This is possibly caused by symbolic variables
were not parsed to tensors during PyTorch to ONNX exporting. Please ensure that the exporting
function follows the official fake_quantize_per_tensor_affine exporting scheme.
Args:
nodes_to_convert(List[gs.Node]): list of nodes to convert.
"""
for node_input in nodes_to_convert:
# Copy Constant into temporary variable
node_input_copy = gs.Constant(name=node_input.name + "_constant",
values=node_input.values,
data_location=node_input.data_location)
# Make Constant Node and append to 'graph'
node_input_copy_node = gs.Node(op="Constant",
attrs={'value': node_input_copy},
inputs=[],
outputs=[node_input_copy])
graph.nodes.append(node_input_copy_node)
# Convert original Constant to Variable type with the copied Constant as input
node_input.to_variable(dtype=node_input.dtype, shape=node_input.shape)
node_input.inputs.append(node_input_copy_node)
if not node.op == "QuantizeLinear" or len(node.inputs) != 3:
raise RuntimeError(f"Expected QuantizeLinear with 3 arguments, but got {node.op} with "
f"{len(node.inputs)} arguments.")
# For weight quantizers: Exported as per-channel QuantLinear operators, `x` and
# `y_zero_point` are parsed as gs.Constants and `y_scale` is a gs.Variable filled by a
# gs.Constant operator.
if type(node.inputs[0]) == gs.Constant:
if type(node.inputs[1]) == gs.Constant:
convert_constant_to_variable_node([node.inputs[1]])
if (not type(node.inputs[1]) == gs.Variable and type(node.inputs[2]) == gs.Constant):
raise RuntimeError(
f"Expected weight quantizer scale and zero_point to be Variable and Constant, "
f"resp., but got {tuple(type(node_input) for node_input in node.inputs[1:])}. "
f"{QATModelParser._fake_quantize_per_channel_affine_err_hint}.")
if not (len(node.inputs[1].inputs) == 1 and node.inputs[1].inputs[0].op == "Constant"):
raise RuntimeError(
f"Expected QuantizeLinear operator's scale argument to be parsed as "
f"gs.Variable filled by gs.Constant operator, but got "
f"{node.inputs[1].inputs[0].op} operator. "
f"{QATModelParser._fake_quantize_per_channel_affine_err_hint}.")
quantize_tensor = node.inputs[0]
# For activation quantizers: Exported as per-tensor QuantizeLinear operators, `x`, `y_scale`
# and `y_zero_point` are all parsed to gs.Variables and scale and zero-point are filled by
# gs.Constant operators.
else:
nodes_to_convert = [
node_input for node_input in node.inputs if type(node_input) == gs.Constant
]
convert_constant_to_variable_node(nodes_to_convert)
if not all(type(node_input) == gs.Variable for node_input in node.inputs):
raise RuntimeError(
f"Expected activation quantizer arguments to be Variables, but got "
f"{tuple(type(node_input) for node_input in node.inputs)}. "
f"{QATModelParser._fake_quantize_per_tensor_affine_err_hint}.")
if not all(
len(var.inputs) == 1 and var.inputs[0].op == "Constant"
for var in node.inputs[1:]):
raise RuntimeError(
f"Expected QuantizeLinear operator's scale and zero_point arguments to be "
f"parsed as gs.Variables filled by gs.Constant operators, but got "
f"{tuple(var.inputs[0].op for var in node.inputs[1:])} operators. "
f"{QATModelParser._fake_quantize_per_tensor_affine_err_hint}.")
quantize_tensor = node.inputs[0]
return quantize_tensor
@staticmethod
def verify_weight_scales(tensor_data: np.ndarray, quant_scales: np.ndarray, node_name: str,
node_op: str):
"""Verify that the weight scales correspond to how TensorRT performs weight quantization."""
tensor_data_reshaped = tensor_data.copy()
if node_op == 'MatMul':
tensor_data_reshaped = tensor_data_reshaped.T
K = tensor_data_reshaped.shape[0]
tensor_data_reshaped = tensor_data_reshaped.reshape(K, -1)
t_min = tensor_data_reshaped.min(axis=1)
t_max = tensor_data_reshaped.max(axis=1)
dyn_range = np.max([np.abs(t_min), np.abs(t_max)], axis=0)
derived_scales = dyn_range / 127.0
assert derived_scales.size == quant_scales.size, f'node {node_name} scale sizes did not match: expected_scales.size={derived_scales.size}, quant_scales.size={quant_scales.size}.\n' \
'Note: Only per-channel/per-filter weight quantization is supported, unless you set `--calibration_type histogram` which skips this step (not recommended).'
assert np.isclose(derived_scales, quant_scales, atol=WEIGHT_SCALE_TOLERANCE).all(
), f'node {node_name} scales do not match: expected_scales={derived_scales},\n quant_scales={quant_scales}'
@staticmethod
def node_replace_input(node: gs.Node, name: str, tensor: Union[gs.Variable, gs.Constant],
quant_scales: np.ndarray, calibration_type: str):
""" For a given node, try to replace one of its inputs to the given tensor.
Args:
node(gs.Node): The node to replace the input.
name(str): For all the inputs of node, if it has one that matches with name,
replace it with given tensor.
tensor(Union[gs.Variable, gs.Constant]): Used to replace one of the inputs of node.
"""
for index, node_input in enumerate(node.inputs):
if node_input.name == name:
assert node_input.shape == tensor.shape
assert node_input.dtype == tensor.dtype
if isinstance(tensor, gs.Constant) and calibration_type == 'max':
QATModelParser.verify_weight_scales(tensor.values, quant_scales, node.name,
node.op)
node.inputs[index] = tensor
@staticmethod
def graph_replace_output(
graph: gs.Graph,
name: str,
tensor: Union[gs.Variable, gs.Constant],
):
""" For a graph, try to replace one of its outputs to the given tensor.
Args:
graph(gs.Graph): graph to replace its outputs.
name(str): For all the outputs of the graph, if it has one that matches with name,
replace it with given tensor.
tensor(Union[gs.Variable, gs.Constant]): Used to replace one of the outputs of graph.
"""
for index, graph_output in enumerate(graph.outputs):
if graph_output.name == name:
assert graph_output.shape == tensor.shape
assert graph_output.dtype == tensor.dtype
tensor.name = name
graph.outputs[index] = tensor
@staticmethod
def extract_qdq_scales(quantize_node: gs.Node, dequantize_node: gs.Node):
if dequantize_node.op != "DequantizeLinear":
raise ValueError("The dequantize node must be DequantizeLinear type.")
assert len(dequantize_node.outputs) == 1
if isinstance(quantize_node.inputs[1], gs.Constant):
quant_scales = quantize_node.inputs[1].values
else:
quant_scales = quantize_node.inputs[1].inputs[0].attrs["value"].values
if len(quantize_node.inputs) > 2 and isinstance(quantize_node.inputs[2], gs.Constant):
quant_zero_points = quantize_node.inputs[2].values
assert (quant_zero_points == 0).all(), 'zero_points for '
if isinstance(dequantize_node.inputs[1], gs.Constant):
dequant_scales = dequantize_node.inputs[1].values
else:
dequant_scales = dequantize_node.inputs[1].inputs[0].attrs["value"].values
if len(dequantize_node.inputs) > 2 and isinstance(quantize_node.inputs[2], gs.Constant):
dequant_zero_points = dequantize_node.inputs[2].values
assert (dequant_zero_points == 0).all()
assert (quant_scales == dequant_scales).all()
return quant_scales
@staticmethod
def extract_precision_config(graph: gs.Graph, calibration_type: str):
precision_config = {}
# Check for all zero weighted inputs of QuantizeLinear and
# Conv nodes and add to this set to skip for the later check
zero_check_skip = set()
for node in graph.nodes:
if node.op != "QuantizeLinear":
if node.op in ("Conv", "ConvTranspose", "Gemm"):
for i in node.inputs:
ti = i.copy()
if isinstance(i, gs.ir.tensor.Constant) and not ti.values.any():
zero_check_skip.add(i.name)
continue
for i in node.inputs:
# Make a shallow copy of input due to a bug in ONNX GS while calling i.values
ti = i.copy()
if i.name.endswith("weight") and not ti.values.any():
zero_check_skip.add(i.name)
quantize_node = node
# Ensure support for TF-generated ONNX models
quantize_tensor = QATModelParser.get_quantized_tensor(quantize_node, graph)
# Only quantized activation has input node.
is_activation_quantizer = len(quantize_tensor.inputs) > 0
is_input_quantizer = len(quantize_tensor.inputs) == 0 and quantize_tensor.name in [
i.name for i in graph.inputs
]
if is_activation_quantizer or is_input_quantizer:
# This assumes the quantization for activation is per-tensor quantization.
# Note, TensorRT_Optimization tools requires the key needs to be the name
# of the quantized tensor.
config_input = quantize_node.inputs[1].inputs[0].attrs["value"].values
# Scales need to be multiplied by the quant_max value for activation
# quantizer
precision_config[quantize_tensor.name] = float(config_input)
# The dequantize node is followed by the quantize node.
dequantize_node = node.o()
quant_scales = QATModelParser.extract_qdq_scales(quantize_node, dequantize_node)
dequantize_output = dequantize_node.outputs[0]
# Find all nodes whose inputs has dequantize_output.
# It seems to be a bug, dequantize_node.outputs[0].outputs only return one nodes.
for node in graph.nodes:
QATModelParser.node_replace_input(node, dequantize_output.name, quantize_tensor,
quant_scales, calibration_type)
# If The DequantizeLinear is a termination node, we also need to replace one of its
# outputs to quantize_tensor
QATModelParser.graph_replace_output(graph, dequantize_output.name, quantize_tensor)
# Remove quantize node and dequantize node
quantize_node.outputs.clear()
dequantize_node.outputs.clear()
# Check if no conv nodes other than the initial
# zero-weighted conv nodes have all zero weights
for node in graph.nodes:
if node.op in ("Conv", "ConvTranspose", "Gemm"):
for i in node.inputs:
ti = i.copy()
if (isinstance(i, gs.ir.tensor.Constant) and i.name not in zero_check_skip):
assert ti.values.any()
graph.cleanup()
graph.toposort()
return precision_config
@staticmethod
def find_with_input_node(graph, name):
for node in graph.nodes:
if len(node.inputs) > 0 and name in node.inputs:
return node
@staticmethod
def infer_mul_scales(graph, node, precision_config):
out_name = node.outputs[0].name
input_scales = [None, None]
output_scale = None
if out_name in precision_config.keys():
output_scale = precision_config[out_name]
if output_scale is None:
return
for ind, inp in enumerate(node.inputs):
if inp.name in precision_config.keys():
input_scales[ind] = precision_config[inp.name]
if input_scales[0] is not None and input_scales[1] is not None:
return
# if 2 of the 3 I/O scales (2 input scales, 1 output scale) are already known, the missing scale can be inferred through deduction.
if input_scales[0] is not None:
precision_config[node.inputs[1].name] = (output_scale / input_scales[0]) / 127.
elif input_scales[1] is not None:
precision_config[node.inputs[0].name] = (output_scale / input_scales[1]) / 127.
@staticmethod
def infer_unchanged_scales(
graph: gs.Graph,
precision_config: Dict[str, float],
downstream: bool,
ops_to_infer_adjacent_scales: Set[str],
zero_check_skip: Set[str]
):
graph.toposort()
node_list = graph.nodes
if not downstream:
node_list = node_list[::-1]
for node in node_list:
if node.op in ops_to_infer_adjacent_scales:
in_name = node.inputs[0].name
out_name = node.outputs[0].name
if downstream:
if node.op == "Concat":
max_input_scale = -np.inf
for concat_inp in node.inputs:
if concat_inp.name in precision_config.keys() and precision_config[concat_inp.name] >= max_input_scale:
max_input_scale = precision_config[concat_inp.name]
if max_input_scale != -np.inf:
precision_config[out_name] = max_input_scale
elif node.op == 'Sigmoid':
if out_name not in precision_config.keys() and 'Mul' in ops_to_infer_adjacent_scales and QATModelParser.find_with_input_node(graph, node.outputs[0]).op == 'Mul':
zero_check_skip.add(out_name)
precision_config[out_name] = 1/127.
else:
if in_name in precision_config.keys(
) and out_name not in precision_config.keys():
precision_config[out_name] = precision_config[in_name]
elif out_name in precision_config.keys() and in_name not in precision_config.keys(
):
if node.op == "Sigmoid": # update Sigmoid scales during backward pass.
if out_name in zero_check_skip:
continue
y = precision_config[out_name] * 127.
try:
precision_config[in_name] = np.log(y / (1 - y)) / 127.
except ZeroDivisionError:
print("Illegal Division by 0 detected!")
raise
elif node.op == "Concat":
for concat_inp in node.inputs:
if concat_inp.name not in precision_config.keys():
precision_config[concat_inp.name] = precision_config[out_name]
elif node.op == "Mul":
QATModelParser.infer_mul_scales(graph, node, precision_config)
else:
precision_config[in_name] = precision_config[out_name]
@staticmethod
def prepare_for_bn_fusion(graph: gs.Graph, rename_node_outputs: bool):
for node in graph.nodes:
# conv_bn_fusion in onnx_optimizer only works if there's a Conv bias
if node.op in {'Conv'} and len(node.inputs) == 2:
weight = node.inputs[1]
output_channels = weight.shape[0]
bias = gs.Constant(f'{node.name}_bias',
np.zeros(output_channels, dtype=weight.dtype))
node.inputs.append(bias)
# we want to preserve the bn's output tensor name (which most likely has the scaling factor)
if node.op in {'BatchNormalization'}:
orig_in_name = node.inputs[0].name
orig_out_name = node.outputs[0].name
node.inputs[0].name = orig_out_name
node.outputs[0].name = orig_in_name
if rename_node_outputs and len(node.inputs[0].inputs) == 1:
node.inputs[0].inputs[0].name = orig_out_name
@staticmethod
def fold_reshape_transpose_into_conv(graph: gs.Graph):
"""
Delete Reshape->Transpose between DQ and Conv layer:
Original: QL -> DQL (weight) -> Reshape -> Transpose -> Conv
Processed: (manually transposed QL) -> (manually transposed DQL) -> Conv
This happens in ConvTranspose and grouped Convolutions (where group > 1). This optimization is needed
in order to successfully build a TensorRT engine.
"""
def check_descendants(
node: Node,
pattern: List = ["DequantizeLinear", "Reshape", "Transpose", "Conv"]) -> bool:
""" Check if node's descendants follow a specific 'pattern'.
Args:
node (Node): initial node.
pattern (List): list containing node's descendants ([child, grandchild, great-grandchild, ...]).
Returns:
bool: indicating whether node's descendants follow the given pattern.
node_out[0]: first child node.
node_out[-2]: second-to-last child node (input to the last layer in 'pattern').
"""
node_out = [node.o()]
for i, p in enumerate(pattern):
if node_out[i].op == p:
node_out.append(node_out[i].o())
else:
return False, node_out[0], None
return True, node_out[0], node_out[-2]
def _transpose_gs_variable(variable_node, new_shape, new_dtype):
quant_var_new_shape = np.asarray(new_shape)
quant_var_new_shape = quant_var_new_shape.astype(np.int64).tolist()
quant_new_var_output = Variable(name=variable_node.name,
dtype=new_dtype,
shape=quant_var_new_shape)
quant_new_var_output.inputs = variable_node.inputs
quant_new_var_output.outputs = variable_node.outputs
variable_node.inputs.clear()
variable_node.outputs.clear()
# Bring the 4D Variable Matrix back to index 0.
# Otherwise, the following error is given:
# [E] [TRT] ModelImporter.cpp:726: ERROR: builtin_op_importers.cpp:1039 In function QuantDequantLinearHelper:
# [6] Assertion failed: scaleAllPositive && "Scale coefficients must all be positive"
quant_new_var_output.outputs[0].inputs.insert(
0, quant_new_var_output.outputs[0].inputs[-1])
quant_new_var_output.outputs[0].inputs.pop(-1)
return quant_new_var_output
# 1. Find all the QuantLinear nodes
quant_nodes = [node for node in graph.nodes if node.op == "QuantizeLinear"]
# 2. Remove Reshape->Transpose layers between DQ and Conv layers
pattern = ["DequantizeLinear", "Reshape", "Transpose", "Conv"]
for (i, node) in enumerate(quant_nodes):
has_pattern, node_out, node_conv_input = check_descendants(node, pattern)
if has_pattern:
# A. Transpose QuantizeLinear weights and output variable (3x3x960x1 -> 960x1x3x3)
quant_weights_tensor = node.inputs[0]
quant_weights_transposed = np.transpose(quant_weights_tensor.values, [2, 3, 0, 1])
node.inputs[0].values = quant_weights_transposed
quant_var_output = node.outputs[0]
node.outputs[0] = _transpose_gs_variable(
quant_var_output,
new_shape=quant_weights_tensor.shape,
new_dtype=np.int8 # The output of QuantLinear should be INT8
)
# B. Transpose DequantizeLinear, with output precision = QuantLinear input's type (np.fp32)
dequant_var_output = node_out.outputs[0]
node_out.outputs[0] = _transpose_gs_variable(dequant_var_output,
new_shape=quant_weights_tensor.shape,
new_dtype=quant_weights_tensor.dtype)
# C. Connect the output of DQLinear to Conv
# Note: input at index 0 is from the input quantization
node_conv_input.inputs[1] = node_out.outputs[0]
# 3. Remove unused nodes, and topologically sort the graph.
graph.cleanup().toposort()
new_model = gs.export_onnx(graph)
graph = gs.import_onnx(new_model)
return graph
@staticmethod
def fuse_with_conv_through_unary_scales(node: gs.Node, tensor: gs.Variable,
precision_config: Dict[str, float],
unary_scales_tensors: Set[str]):
can_fuse = False
is_conv_ew_fusion = node.op == 'Conv'
single_downstream_node = tensor.outputs[0] if len(tensor.outputs) == 1 else None
is_conv_ew_fusion = is_conv_ew_fusion and single_downstream_node is not None
is_conv_ew_fusion = is_conv_ew_fusion and single_downstream_node.op in {'Add'}
is_conv_ew_fusion = is_conv_ew_fusion and len(single_downstream_node.inputs) == 2
other_input = single_downstream_node.inputs[1] if is_conv_ew_fusion else None
is_conv_ew_fusion = is_conv_ew_fusion and other_input.name not in unary_scales_tensors
if is_conv_ew_fusion:
unary_scales_tensors.add(tensor.name)
precision_config[tensor.name] = 1.0
logging.info(
f'No tensor scales for {node.name}\'s output tensor {tensor.name} but assuming {node.op} + {single_downstream_node.op} fusion'
)
can_fuse = True
return can_fuse
@staticmethod
def parse(model_path: str, output_dir: str, post_opt_passes: List[str],
ops_to_infer_adjacent_scales: Set[str], trt_calib_version: str,
rename_node_outputs: bool, add_unary_ew_scales_for_dla: bool,
calibration_type: str):
""" Process the ONNX model.
Args:
model_path(str): Path to the ONNX model.
output_dir(str): Output folder for saving the results.
"""
model = onnx.load(model_path)
model = onnx.shape_inference.infer_shapes(model)
graph = gs.import_onnx(model)
if rename_node_outputs:
for node in graph.nodes:
for idx, out in enumerate(node.outputs):
name = node.name
if idx > 0:
name = f'{name}_{idx}'
out.name = name
# Pre-process model to remove Reshape+Transpose layers after weight DQ and before Conv layer
logging.debug(f'Calling fold_reshape_transpose_into_conv()...')
graph = QATModelParser.fold_reshape_transpose_into_conv(graph)
# Extract precision
logging.debug(f'Calling extract_precision_config()...')
precision_config = QATModelParser.extract_precision_config(graph, calibration_type)
# forward pass
zero_check_skip = set()
logging.debug(f'Calling infer_unchanged_scales() with downstream=True...')
QATModelParser.infer_unchanged_scales(graph, precision_config, True,
ops_to_infer_adjacent_scales, zero_check_skip)
# backward pass
logging.debug(f'Calling infer_unchanged_scales() with downstream=False...')
QATModelParser.infer_unchanged_scales(graph, precision_config, False,
ops_to_infer_adjacent_scales, zero_check_skip)
if 'fuse_bn_into_conv' in post_opt_passes:
logging.debug(f'Calling fuse_bn_into_conv()...')
QATModelParser.prepare_for_bn_fusion(graph, rename_node_outputs)
# Export translated model and scales
new_model = gs.export_onnx(graph)
if len(post_opt_passes) > 0:
logging.debug(f'Calling onnxoptimizer.optimize()...')
new_model = onnxoptimizer.optimize(new_model, passes=post_opt_passes)
logging.debug(f'Calling onnx.checker.check_model()...')
onnx.checker.check_model(new_model)
model_name = os.path.splitext(os.path.basename(model_path))[0]
output_onnx_path = os.path.join(output_dir, f"{model_name}_noqdq.onnx")
logging.debug(f'Saving the new model as {output_onnx_path}...')
onnx.save(new_model, output_onnx_path)
new_graph = gs.import_onnx(new_model)
out_tensors = [(None, input) for input in new_graph.inputs]
out_tensors += [(node, node.outputs[0]) for node in new_graph.nodes]
fp16_nodes = list()
unary_scales_tensors = set()
for node, tensor in out_tensors:
if not isinstance(tensor, gs.Constant) and tensor.name not in precision_config.keys():
can_fuse_with_conv = add_unary_ew_scales_for_dla and QATModelParser.fuse_with_conv_through_unary_scales(
node, tensor, precision_config, unary_scales_tensors)
if not can_fuse_with_conv:
consumers = [node.name for node in tensor.outputs]
fp16_nodes.extend(consumers)
qualifier = 'input' if node is None else f'{node.name}\'s output'
addition = f', recommended to set its consumer nodes {consumers} to fp16' if len(
consumers) > 0 else ''
logging.info(
f'No tensor scales for {qualifier} tensor {tensor.name}{addition}')
precision_config = {'int8_tensor_scales': precision_config, 'fp16_nodes': fp16_nodes}
output_json_path = os.path.join(output_dir, f'{model_name}_precision_config.json')
logging.debug(f'Saving the extracted precision config as {output_json_path}...')
with open(output_json_path, 'w') as f:
json.dump(precision_config, f, indent=4)
export_to_trt_calib(output_json_path, trt_calib_version)
def main(args):
log_level = logging.INFO
if args.verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
# For all passes: https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes
opt_passes = [
'extract_constant_to_initializer', 'fuse_bn_into_conv', 'fuse_pad_into_conv',
'fuse_pad_into_pool'
]
ops_to_infer_adjacent_scales = DEFAULT_OPS_TO_INFER_ADJACENT_SCALES
for op in args.addtl_ops_to_infer_adjacent_scales:
ops_to_infer_adjacent_scales.add(op)
if args.infer_average_pool_scales:
ops_to_infer_adjacent_scales.add('AveragePool')
ops_to_infer_adjacent_scales.add('GlobalAveragePool')
ops_to_infer_adjacent_scales.add('ReduceMean')
if args.infer_concat_scales:
ops_to_infer_adjacent_scales.add("Concat")
if args.infer_mul_scales:
ops_to_infer_adjacent_scales.add('Mul')
os.makedirs(args.output_dir, exist_ok=True)
parser = QATModelParser()
for onnx_model in args.input_onnx_models:
logging.info(f'Parsing {onnx_model}...')
parser.parse(onnx_model,
args.output_dir,
opt_passes,
ops_to_infer_adjacent_scales,
args.trt_calib_version,
rename_node_outputs=args.rename_node_outputs,
add_unary_ew_scales_for_dla=args.add_unary_ew_scales_for_dla, calibration_type = args.calibration_type)
if __name__ == '__main__':
main(ARGPARSER.parse_args())
|
Deep-Learning-Accelerator-SW-main
|
tools/qdq-translator/qdq_translator.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Registry for op reconstructions."""
from op_reconstruction import *
RECONSTRUCTOR_REGISTRY = [
Neg,
DepthToSpace,
SpaceToDepth,
Xor,
And,
Or,
Not,
Gather,
GRU,
ScatterElements,
Where
]
RECONSTRUCTOR_REGISTRY_DICT = dict()
for reconstructor in RECONSTRUCTOR_REGISTRY:
RECONSTRUCTOR_REGISTRY_DICT[reconstructor.op_to_reconstruct()] = reconstructor
|
Deep-Learning-Accelerator-SW-main
|
operators/registry.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Common utils for op reconstructions."""
import logging
import numpy as np
np.random.seed(0xdeadbeef)
logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
|
Deep-Learning-Accelerator-SW-main
|
operators/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Simple testbench for all op reconstructions."""
import logging
from collections import defaultdict
from registry import RECONSTRUCTOR_REGISTRY
MAXABSDIFF_THRESHOLDS = defaultdict(lambda: 0.0)
MAXABSDIFF_THRESHOLDS['GRU'] = 1e-6
def log_results(passed_tests, failed_tests):
num_passed_tests = len(passed_tests)
num_failed_tests = len(failed_tests)
num_total_tests = num_passed_tests + num_failed_tests
passed_percentage = num_passed_tests * 100.0 / num_total_tests
logging.info(f'{len(passed_tests)}/{num_total_tests} ({passed_percentage}%) tests passed.')
logging.info(f'{num_passed_tests} passed tests: {passed_tests}')
logging.info(f'{num_failed_tests} failed tests: {failed_tests}')
def test():
passed_tests = list()
failed_tests = list()
for reconstructor in RECONSTRUCTOR_REGISTRY:
op = reconstructor()
threshold = MAXABSDIFF_THRESHOLDS[op.op]
maxabsdiff = op.test()
if maxabsdiff <= threshold:
passed_tests.append(op.op)
else:
failed_tests.append(op.op)
logging.error(f'Test for {op.op} failed')
log_results(passed_tests, failed_tests)
def main():
test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/test.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a Gather op."""
from logging import info
import onnx
import numpy as np
import onnx_graphsurgeon as gs
from common.IndexSelectionOperator import IndexSelectionOperator
class Gather(IndexSelectionOperator):
expected_num_inputs = 2
def generate(self, input_shapes, attrs, num_indices):
assert len(input_shapes) == 1
input_shape = input_shapes[0]
dtype = np.float32
var_input = gs.Variable(name=self.new_tensor_name(), dtype=dtype, shape=input_shape)
outputs = [gs.Variable(name=self.new_tensor_name(), dtype=dtype)]
axis = attrs.get('axis', 0)
index_vals = np.arange(input_shape[axis], dtype=np.int64)
np.random.shuffle(index_vals)
index_vals = index_vals[:num_indices]
index_constant = gs.Constant(name=self.new_tensor_name(), values=index_vals)
node = gs.Node(op=self.op,
inputs=[var_input, index_constant],
outputs=outputs,
name=self.new_node_name())
node.attrs = attrs
graph = gs.Graph(nodes=[node], inputs=[var_input], outputs=node.outputs, name=node.name)
graph.name = f'{self.op}_axis{axis}_orig'
return graph
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
orig_axis = node.attrs.get('axis', 0)
transposes_needed = IndexSelectionOperator.insert_transposes_if_needed(
node, graph, orig_axis)
if transposes_needed:
node.attrs['axis'] = 1
axis = node.attrs.get('axis', 0)
assert axis == 1
channels_in = node.inputs[0].shape[1]
indices = node.inputs[1]
assert len(indices.shape) == 1
channels_out = indices.shape[0]
node.op = 'Conv'
node.attrs.clear()
weight_shape = (channels_out, channels_in, 1, 1)
weight_vals = np.zeros(weight_shape, dtype=np.float32)
for c_out, c_in in enumerate(indices.values.flatten()):
weight_vals[c_out, c_in, :, :] = 1
weight_constant = gs.Constant(name=f'{node.name}_tmp0', values=weight_vals)
node.inputs = [node.inputs[0], weight_constant]
graph.cleanup()
def test(self, input_shapes=None, num_indices=None, axes=None):
input_shapes = input_shapes or [(1, 5, 6, 7)]
num_indices = num_indices or 3
axes = axes or [1, 2, 3]
maxabsdiff = 0.0
for axis in axes:
attrs = dict(axis=axis)
orig_graph = self.generate(input_shapes, attrs, num_indices)
orig_path = IndexSelectionOperator.save_gs_graph(orig_graph, run_shape_inference=True)
reconstructed_graph = gs.import_onnx(onnx.load(orig_path))
for node in reconstructed_graph.nodes:
self.reconstruct(node, reconstructed_graph)
reconstructed_graph.name = orig_graph.name.replace('_orig', '_reconstructed')
reconstructed_path = IndexSelectionOperator.save_gs_graph(reconstructed_graph)
maxabsdiff_axis = self.run_comparison([orig_path, reconstructed_path],
input_shapes=input_shapes,
incremental_vals=True)
maxabsdiff = max(maxabsdiff_axis, maxabsdiff)
return maxabsdiff
def main():
op = Gather()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/Gather.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a Neg op."""
from logging import info
import numpy as np
import onnx_graphsurgeon as gs
from common.Operator import Operator
class Neg(Operator):
def generate(self, input_shapes, **kwargs):
# modify here
graph = super().generate(input_shapes, **kwargs)
return graph
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
node.op = 'Mul'
node.attrs.clear()
mul_shape = (1, )
mul_vals = np.zeros(mul_shape, dtype=np.float32)
mul_vals.fill(-1.0)
mul_constant = gs.Constant(name=f'{node.name}_tmp0', values=mul_vals)
node.inputs.append(mul_constant)
def test(self, input_shapes=[(1, 2, 3, 4)], **kwargs):
return super().test(input_shapes, **kwargs)
def main():
op = Neg()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/Neg.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a SpaceToDepth op."""
from logging import info
import numpy as np
import onnx_graphsurgeon as gs
from common.Operator import Operator
class SpaceToDepth(Operator):
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
blocksize = node.attrs.get('blocksize', 1)
channels_in, height_in, width_in = node.inputs[0].shape[1:]
assert height_in % blocksize == 0
assert width_in % blocksize == 0
channels_out = channels_in * blocksize * blocksize
node.op = 'Conv'
node.attrs = dict(kernel_shape=(blocksize, blocksize), strides=(blocksize, blocksize))
weight_shape = (channels_out, channels_in, blocksize, blocksize)
weight_vals = np.zeros(weight_shape, dtype=np.float32)
stacked_kernels = weight_vals.reshape(channels_out,
channels_in * blocksize * blocksize)
for block_idx in range(channels_out):
kernel_one_idx = block_idx // channels_in + (block_idx %
channels_in) * blocksize * blocksize
stacked_kernels[block_idx][kernel_one_idx] = 1
weight_vals = stacked_kernels.reshape(weight_shape)
weight_constant = gs.Constant(name=f'{node.name}_tmp0', values=weight_vals)
node.inputs.append(weight_constant)
def test(self, input_shapes=None, blocksize=None):
input_shapes = input_shapes or [(1, 5, 768, 768)]
blocksize = blocksize or 4
attrs = dict(blocksize=blocksize)
return super().test(input_shapes, attrs=attrs)
def main():
op = SpaceToDepth()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/SpaceToDepth.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a Where op."""
from logging import info
import onnx
import numpy as np
import onnx_graphsurgeon as gs
from common.IndexSelectionOperator import IndexSelectionOperator
class Where(IndexSelectionOperator):
expected_num_inputs = 3
def generate(self, input_shapes, axis=0):
assert len(input_shapes) == 2
assert tuple(input_shapes[0]) == tuple(input_shapes[1])
input_shape = input_shapes[0]
dtype = np.float32
var_inputs = [
gs.Variable(name=self.new_tensor_name(), dtype=dtype, shape=input_shape)
for _ in input_shapes
]
outputs = [gs.Variable(name=self.new_tensor_name(), dtype=dtype)]
condition_shape = [1] * 4
condition_shape[axis] = input_shape[axis]
condition_vals = np.random.choice(a=[False, True], size=condition_shape)
condition_constant = gs.Constant(name=self.new_tensor_name(), values=condition_vals)
node = gs.Node(op=self.op,
inputs=[condition_constant] + var_inputs,
outputs=outputs,
name=self.new_node_name())
graph = gs.Graph(nodes=[node], inputs=var_inputs, outputs=node.outputs, name=node.name)
graph.name = f'{self.op}_axis{axis}_orig'
return graph
@staticmethod
def get_orig_axis(condition_shape, var_input_shape):
C, H, W = var_input_shape[1:]
condition_shape = tuple(condition_shape)
orig_axis = 0
if condition_shape == (1, C, 1, 1):
orig_axis = 1
elif condition_shape == (1, 1, H, 1):
orig_axis = 2
elif condition_shape == (1, 1, 1, W):
orig_axis = 3
return orig_axis
@staticmethod
def insert_transposes_if_needed(node, graph):
condition = node.inputs[0]
var_input_shape = node.inputs[1].shape
orig_axis = Where.get_orig_axis(condition.shape, var_input_shape)
assert orig_axis != 0, 'Got orig_axis == 0, this op should not have qualified for reconstruction in the first place'
transposes_needed = IndexSelectionOperator.insert_transposes_if_needed(
node, graph, orig_axis)
if transposes_needed:
if orig_axis == 2:
first_permute = (0, 2, 3, 1)
elif orig_axis == 3:
first_permute = (0, 3, 2, 1)
elif orig_axis == 1:
assert False, 'Should not have required to transpose'
else:
assert False, 'Not implemented'
condition.values = np.transpose(condition.values, first_permute)
@classmethod
def qualifies_for_reconstruction(cls, node):
result = node.op == cls.op_to_reconstruct()
if result:
result = result and len(node.inputs) == cls.expected_num_inputs
result = result and isinstance(node.inputs[0], gs.Constant)
result = result and not isinstance(node.inputs[1], gs.Constant) and len(
node.inputs[1].shape) == 4
result = result and not isinstance(node.inputs[2], gs.Constant) and len(
node.inputs[0].shape) == 4
result = result and tuple(node.inputs[1].shape) == tuple(node.inputs[2].shape)
if result:
condition_shape = node.inputs[0].shape
var_input_shape = node.inputs[1].shape
orig_axis = Where.get_orig_axis(condition_shape, var_input_shape)
result &= orig_axis in {1, 2, 3}
return result
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
Where.insert_transposes_if_needed(node, graph)
condition = node.inputs[0]
var_inputs = node.inputs[1:]
channels_out = var_inputs[0].shape[1]
channels_in = 2 * channels_out
dtype = var_inputs[0].dtype
concat_name = f'{node.name}_concat'
concat_out = gs.Variable(name=concat_name, dtype=dtype)
concat_node = gs.Node(op='Concat',
inputs=var_inputs,
outputs=[concat_out],
name=concat_name,
attrs=dict(axis=1))
graph.nodes.append(concat_node)
node.op = 'Conv'
weight_shape = (channels_out, channels_in, 1, 1)
weight_vals = np.zeros(weight_shape, dtype=np.float32)
for c_out, selector in enumerate(condition.values.flatten()):
selector_idx = 0 if selector else 1
weight_vals[c_out, selector_idx * channels_out + c_out, :, :] = 1
weight_constant = gs.Constant(name=f'{node.name}_tmp0', values=weight_vals)
node.inputs = [concat_out, weight_constant]
graph.cleanup()
def test(self, input_shapes=None, axes=None):
input_shapes = input_shapes or [(1, 3, 5, 7)] * 2
axes = axes or [1, 2, 3]
maxabsdiff = 0.0
for axis in axes:
orig_graph = self.generate(input_shapes, axis)
orig_path = IndexSelectionOperator.save_gs_graph(orig_graph, run_shape_inference=True)
reconstructed_graph = gs.import_onnx(onnx.load(orig_path))
for node in reconstructed_graph.nodes:
self.reconstruct(node, reconstructed_graph)
reconstructed_graph.name = orig_graph.name.replace('_orig', '_reconstructed')
reconstructed_path = IndexSelectionOperator.save_gs_graph(reconstructed_graph)
maxabsdiff_axis = self.run_comparison([orig_path, reconstructed_path],
input_shapes=input_shapes)
maxabsdiff = max(maxabsdiff_axis, maxabsdiff)
return maxabsdiff
def main():
op = Where()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/Where.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of an Or op."""
from logging import info
import numpy as np
import onnx_graphsurgeon as gs
from common.BinaryOperator import BinaryOperator
class Or(BinaryOperator):
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
node.op = 'Add'
for tensor in node.inputs + node.outputs:
tensor.dtype = np.float32
tmp_tensor = gs.Variable(name=f'{node.name}_tmp0', dtype=np.float32)
clip_min = gs.Constant(name=f'{node.name}_tmp1',
values=np.zeros((1, ), dtype=np.float32))
clip_max = gs.Constant(name=f'{node.name}_tmp2',
values=np.ones((1, ), dtype=np.float32))
clip_node = gs.Node(op='Clip',
inputs=[tmp_tensor, clip_min, clip_max],
outputs=[node.outputs[0]],
name=f'{node.name}_clip')
node.outputs = [tmp_tensor]
graph.nodes.append(clip_node)
def test(self):
input_shapes = [(1, 4, 1, 1)] * 2
input_data = list()
input_data.append(np.array([True, True, False, False]).reshape(input_shapes[0]))
input_data.append(np.array([True, False, True, False]).reshape(input_shapes[1]))
return super().test(input_data=input_data)
def main():
op = Or()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/Or.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Taken from https://julienharbulot.com/python-dynamical-import.html"""
from inspect import isclass
from pkgutil import iter_modules
from pathlib import Path
from importlib import import_module
# iterate through the modules in the current package
package_dir = Path(__file__).resolve().parent
for (_, module_name, _) in iter_modules([package_dir]):
# import the module and iterate through its attributes
module = import_module(f"{__name__}.{module_name}")
for attribute_name in dir(module):
attribute = getattr(module, attribute_name)
if isclass(attribute):
# Add the class to this package's variables
globals()[attribute_name] = attribute
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a Not op."""
from logging import info
import numpy as np
import onnx_graphsurgeon as gs
from common.BinaryOperator import BinaryOperator
class Not(BinaryOperator):
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
node.op = 'Sub'
for tensor in node.inputs + node.outputs:
tensor.dtype = np.float32
not_var = node.inputs[0]
not_constant = gs.Constant(name=f'{node.name}_tmp0',
values=np.ones(not_var.shape, dtype=not_var.dtype))
node.inputs = [not_constant, not_var]
def test(self):
input_shape = (1, 2, 1, 1)
input_data = np.array([True, False]).reshape(input_shape)
return super().test(input_data=input_data)
def main():
op = Not()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/Not.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a ScatterElements op."""
from logging import info
import onnx
import numpy as np
import onnx_graphsurgeon as gs
from common.IndexSelectionOperator import IndexSelectionOperator
from pdb import set_trace
SCATTER_ELEMENTS_SUPPORTED_REDUCTIONS = {'none'}
class ScatterElements(IndexSelectionOperator):
expected_num_inputs = 3
def generate(self, input_shapes, attrs, num_indices):
assert len(input_shapes) == 2
data_shape, updates_shape = input_shapes
dtype = np.float32
var_input_data = gs.Variable(name=self.new_tensor_name(), dtype=dtype, shape=data_shape)
var_input_updates = gs.Variable(name=self.new_tensor_name(),
dtype=dtype,
shape=updates_shape)
outputs = [gs.Variable(name=self.new_tensor_name(), dtype=dtype)]
axis = attrs.get('axis', 0)
index_vals = np.arange(data_shape[axis], dtype=np.int64)
np.random.shuffle(index_vals)
index_shape = [1] * 4
index_shape[axis] = num_indices
index_vals = index_vals[:num_indices].reshape(index_shape)
index_tile_dims = list(updates_shape)
index_tile_dims[0] = 1
index_tile_dims[axis] = 1
index_vals = np.tile(index_vals, index_tile_dims)
index_constant = gs.Constant(name=self.new_tensor_name(), values=index_vals)
node = gs.Node(op=self.op,
inputs=[var_input_data, index_constant, var_input_updates],
outputs=outputs,
name=self.new_node_name())
node.attrs = attrs
graph = gs.Graph(nodes=[node],
inputs=[var_input_data, var_input_updates],
outputs=node.outputs,
name=node.name)
graph.name = f'{self.op}_axis{axis}_orig'
return graph
@classmethod
def qualifies_for_reconstruction(cls, node):
result = super().qualifies_for_reconstruction(node)
nonconst_inputs = [x for x in node.inputs if not isinstance(x, gs.Constant)]
axis = node.attrs.get('axis', 0)
if result:
reduction = node.attrs.get('reduction', 'none')
result = reduction in SCATTER_ELEMENTS_SUPPORTED_REDUCTIONS
result &= len(nonconst_inputs) == 2
if result:
indices = node.inputs[1].values
for slice_idx in range(indices.shape[axis]):
slice_data = np.take(indices, slice_idx, axis).flatten()
if not (slice_data[0] == slice_data).all():
result = False
break
return result
@staticmethod
def insert_transposes_if_needed(node, graph):
orig_axis = node.attrs.get('axis', 0)
transposes_needed = IndexSelectionOperator.insert_transposes_if_needed(
node, graph, orig_axis)
if transposes_needed:
if orig_axis == 2:
first_permute = (0, 2, 3, 1)
elif orig_axis == 3:
first_permute = (0, 3, 2, 1)
elif orig_axis == 1:
assert False, 'Should not have required to transpose'
else:
assert False, 'Not implemented'
node.attrs['axis'] = 1
indices = node.inputs[1]
indices.values = np.transpose(indices.values, first_permute)
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
cls.insert_transposes_if_needed(node, graph)
axis = node.attrs.get('axis', 0)
assert axis == 1
data, indices, updates = node.inputs
index_vals = indices.values[0, :, 0, 0]
indices_set = set(index_vals)
channels_in_conv = updates.shape[1]
channels_out_conv = data.shape[1]
conv_weight_shape = (channels_out_conv, channels_in_conv, 1, 1)
conv_weight_vals = np.zeros(conv_weight_shape, dtype=np.float32)
for c_in, c_out in enumerate(index_vals):
conv_weight_vals[c_out, c_in, :, :] = 1
conv_output = gs.Variable(name=f'{node.name}_conv_tmp0', dtype=updates.dtype)
conv_weight_constant = gs.Constant(name=f'{node.name}_conv_tmp1',
values=conv_weight_vals)
conv_node = gs.Node(op='Conv',
inputs=[updates, conv_weight_constant],
outputs=[conv_output],
name=f'{node.name}_conv')
graph.nodes.append(conv_node)
scale_weight_shape = (1, channels_out_conv, 1, 1)
scale_weight_vals = np.zeros(scale_weight_shape, dtype=np.float32)
for c in range(scale_weight_vals.size):
if c not in indices_set:
scale_weight_vals[0][c] = 1.0
scale_output = gs.Variable(name=f'{node.name}_scale_tmp0', dtype=updates.dtype)
scale_weight_constant = gs.Constant(name=f'{node.name}_scale_tmp1',
values=scale_weight_vals)
scale_node = gs.Node(op='Mul',
inputs=[data, scale_weight_constant],
outputs=[scale_output],
name=f'{node.name}_scale')
graph.nodes.append(scale_node)
node.op = 'Add'
node.inputs = [scale_output, conv_output]
node.attrs.clear()
graph.cleanup()
def test(self, input_shapes=None, num_indices=None, axes=None):
input_shapes = input_shapes or [(1, 5, 6, 7)]
assert isinstance(input_shapes, list) and len(input_shapes) == 1
input_shape_data = input_shapes[0]
num_indices = num_indices or 3
axes = axes or [1, 2, 3]
maxabsdiff = 0.0
for axis in axes:
attrs = dict(axis=axis)
input_shape_updates = list(input_shape_data)
input_shape_updates[axis] = num_indices
input_shape_updates = tuple(input_shape_updates)
var_input_shapes = [input_shape_data, input_shape_updates]
orig_graph = self.generate(var_input_shapes, attrs, num_indices)
orig_path = IndexSelectionOperator.save_gs_graph(orig_graph, run_shape_inference=True)
reconstructed_graph = gs.import_onnx(onnx.load(orig_path))
for node in reconstructed_graph.nodes:
self.reconstruct(node, reconstructed_graph)
reconstructed_graph.name = orig_graph.name.replace('_orig', '_reconstructed')
reconstructed_path = IndexSelectionOperator.save_gs_graph(reconstructed_graph,
run_shape_inference=True)
maxabsdiff_axis = self.run_comparison([orig_path, reconstructed_path],
input_shapes=var_input_shapes)
maxabsdiff = max(maxabsdiff_axis, maxabsdiff)
return maxabsdiff
def main():
op = ScatterElements()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/ScatterElements.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a Xor op."""
from logging import info
import numpy as np
import onnx_graphsurgeon as gs
from common.BinaryOperator import BinaryOperator
class Xor(BinaryOperator):
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
node.op = 'Sub'
for tensor in node.inputs + node.outputs:
tensor.dtype = np.float32
tmp_tensor = gs.Variable(name=f'{node.name}_tmp0', dtype=np.float32)
abs_node = gs.Node(op='Abs',
inputs=[tmp_tensor],
outputs=[node.outputs[0]],
name=f'{node.name}_abs')
node.outputs = [tmp_tensor]
graph.nodes.append(abs_node)
def test(self):
input_shapes = [(1, 4, 1, 1)] * 2
input_data = list()
input_data.append(np.array([True, True, False, False]).reshape(input_shapes[0]))
input_data.append(np.array([True, False, True, False]).reshape(input_shapes[1]))
return super().test(input_data=input_data)
def main():
op = Xor()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/Xor.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a GRU op."""
from logging import info
import onnx_graphsurgeon as gs
import onnx
from common.Operator import Operator
from common.onnxruntime_utils import ONNXRUNTIME_DT_TO_NUMPY_DT
import onnxruntime
import numpy as np
NUM_DIRECTIONS_DICT = dict(forward=1, reverse=1, bidirectional=2)
class GRU(Operator):
def generate(self, input_shapes, attrs, use_bias=False, use_initial_h=True):
if not isinstance(input_shapes, list):
input_shapes = [input_shapes]
assert len(input_shapes) in {1, 2}
assert len(input_shapes[0]) == 3
assert attrs.get('layout', 0) == 1
batch_size, seq_length, input_size = input_shapes[0]
dtype = np.float32
inputs = list()
hidden_size = attrs.get('hidden_size', 1)
direction = attrs.get('direction', 'forward')
assert direction in {'forward'}
num_directions = NUM_DIRECTIONS_DICT[direction]
X = gs.Variable(name=self.new_tensor_name(), dtype=dtype, shape=input_shapes[0])
W_shape = [num_directions, 3 * hidden_size, input_size]
W = gs.Constant(name=self.new_tensor_name(), values=np.random.rand(*W_shape).astype(dtype))
R_shape = [num_directions, 3 * hidden_size, hidden_size]
R = gs.Constant(name=self.new_tensor_name(), values=np.random.rand(*R_shape).astype(dtype))
inputs = [X, W, R]
if use_bias:
B = gs.Constant(name=self.new_tensor_name(),
values=np.random.rand(num_directions, 6 * hidden_size).astype(dtype))
inputs += [B]
if use_initial_h:
if not use_bias:
B = gs.Constant(name=self.new_tensor_name(),
values=np.zeros([num_directions, 6 * hidden_size], dtype=dtype))
inputs += [B]
sequence_lens = gs.Constant(name=self.new_tensor_name(),
values=np.array([seq_length] * batch_size, dtype=np.int32))
initial_h_shape = [batch_size, num_directions, hidden_size]
initial_h = gs.Variable(name=self.new_tensor_name(),
dtype=dtype,
shape=initial_h_shape)
inputs += [sequence_lens, initial_h]
outputs = [
gs.Variable(name='', dtype=dtype),
gs.Variable(name=self.new_tensor_name(), dtype=dtype)
]
activations = attrs.get('activations', list())
if len(activations) > 0:
assert len(activations) == 2 # would be 4 for bidirectional
attrs['activations'] = activations
node = gs.Node(op=self.op,
inputs=inputs,
outputs=outputs,
attrs=attrs,
name=self.new_node_name())
graph_inputs = [inp for inp in node.inputs if not isinstance(inp, gs.Constant)]
graph = gs.Graph(nodes=[node],
inputs=graph_inputs,
outputs=[node.outputs[-1]],
name=node.name)
graph.name = f'{self.op}_bias{int(use_bias)}_initialh{int(use_initial_h)}_orig'
return graph
@staticmethod
def from2dto4d(data):
return np.expand_dims(data, [-2, -1])
@staticmethod
def add_gru_conv(graph, var_input, prefix, conv_wt_vals, conv_bias_vals, index, dtype):
assert isinstance(var_input, gs.Variable)
conv_out = gs.Variable(name=prefix, dtype=dtype)
conv_Wt = gs.Constant(name=f'{prefix}_wt', values=np.copy(conv_wt_vals[index]))
inputs = [var_input, conv_Wt]
if conv_bias_vals[index] is not None:
inputs.append(gs.Constant(name=f'{prefix}_bias',
values=np.copy(conv_bias_vals[index])))
conv = gs.Node(op='Conv', inputs=inputs, outputs=[conv_out], name=conv_out.name)
graph.nodes.append(conv)
return conv_out
@staticmethod
def add_gru_bias(conv_node, conv_bias_vals, index):
if len(conv_node.inputs) < 3:
return
bias = conv_node.inputs[2]
if conv_bias_vals[index] is not None:
bias.values += conv_bias_vals[index]
@staticmethod
def add_gru_elementwise(graph, ew_op, var_inputs, prefix, dtype, ew_out=None):
assert isinstance(var_inputs, list)
ew_out = ew_out or gs.Variable(name=prefix, dtype=dtype)
ew = gs.Node(op=ew_op, inputs=var_inputs, outputs=[ew_out], name=ew_out.name)
graph.nodes.append(ew)
return ew_out
@staticmethod
def add_gru_act(graph, act_op, var_input, prefix, dtype):
assert isinstance(var_input, gs.Variable)
act_out = gs.Variable(name=prefix, dtype=dtype)
act = gs.Node(op=act_op, inputs=[var_input], outputs=[act_out], name=act_out.name)
graph.nodes.append(act)
return act_out
@staticmethod
def add_gru_transpose(graph, var_input, perm, prefix, dtype):
transpose_out = gs.Variable(name=prefix, dtype=dtype)
transpose = gs.Node(op='Transpose',
inputs=[var_input],
outputs=[transpose_out],
attrs=dict(perm=perm),
name=transpose_out.name)
graph.nodes.append(transpose)
return transpose_out
@staticmethod
def add_gru_slice(graph, slice_channel_idx, slice_axis, var_input, prefix, dtype):
assert isinstance(var_input, gs.Variable)
slice_out = gs.Variable(name=prefix, dtype=dtype)
starts = gs.Constant(name=f'{prefix}_starts',
values=np.array([slice_channel_idx], dtype=np.int64))
ends = gs.Constant(name=f'{prefix}_ends',
values=np.array([slice_channel_idx + 1], dtype=np.int64))
axes = gs.Constant(name=f'{prefix}_axes', values=np.array([slice_axis], dtype=np.int64))
steps = gs.Constant(name=f'{prefix}_steps', values=np.array([1], dtype=np.int64))
slice = gs.Node(op='Slice',
inputs=[var_input, starts, ends, axes, steps],
outputs=[slice_out],
name=slice_out.name)
graph.nodes.append(slice)
return slice_out
@staticmethod
def add_gru_const_op(graph, const_op, var_input, const_vals, prefix, dtype):
assert isinstance(var_input, gs.Variable)
assert isinstance(const_vals, np.ndarray)
op_out = gs.Variable(name=prefix, dtype=dtype)
const_vals = const_vals.reshape(1, -1, 1, 1)
const_input = gs.Constant(name=f'{prefix}_const', values=const_vals.astype(dtype))
op = gs.Node(op=const_op,
inputs=[var_input, const_input],
outputs=[op_out],
name=op_out.name)
graph.nodes.append(op)
return op_out
@staticmethod
def reconstruct_step(graph, node, step_idx, total_steps, X_slice, Ht_prev, W_vals, Wb_vals,
R_vals, Rb_vals, f, g, dtype, linear_before_reset):
zt_index_W = 0
zt_index_R = 0
rt_index_W = 1
rt_index_R = 1
ht_index_W = 2
ht_index_R = 2
base_prefix = f'{node.name}_step{step_idx}'
last_out = node.outputs[-1] if step_idx == total_steps - 1 else None
if Ht_prev is not None:
# zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)
zt_X_out = GRU.add_gru_conv(graph, X_slice, f'{base_prefix}_zt_X', W_vals, Wb_vals,
zt_index_W, dtype)
zt_Ht_out = GRU.add_gru_conv(graph, Ht_prev, f'{base_prefix}_zt_Ht-1', R_vals, Rb_vals,
zt_index_R, dtype)
zt_add_out = GRU.add_gru_elementwise(graph, 'Add', [zt_X_out, zt_Ht_out],
f'{base_prefix}_zt_add', dtype)
zt_act_out = GRU.add_gru_act(graph, f, zt_add_out, f'{base_prefix}_zt_act_f', dtype)
# rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)
rt_X_out = GRU.add_gru_conv(graph, X_slice, f'{base_prefix}_rt_X', W_vals, Wb_vals,
rt_index_W, dtype)
rt_Ht_out = GRU.add_gru_conv(graph, Ht_prev, f'{base_prefix}_rt_Ht-1', R_vals, Rb_vals,
rt_index_R, dtype)
rt_add_out = GRU.add_gru_elementwise(graph, 'Add', [rt_X_out, rt_Ht_out],
f'{base_prefix}_rt_add', dtype)
rt_act_out = GRU.add_gru_act(graph, f, rt_add_out, f'{base_prefix}_rt_act_f', dtype)
# Xt*(Wh^T)
ht_X_out = GRU.add_gru_conv(graph, X_slice, f'{base_prefix}_X_ht', W_vals, Wb_vals,
ht_index_W, dtype)
# (rt (.) Ht-1)
ht_mul_out = GRU.add_gru_elementwise(graph, 'Mul', [rt_act_out, Ht_prev],
f'{base_prefix}_Ht_mul', dtype)
# (rt (.) Ht-1)*(Rh^T)
ht_Ht_out = GRU.add_gru_conv(graph, ht_mul_out, f'{base_prefix}_Ht', R_vals, Rb_vals,
ht_index_R, dtype)
# Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh
X_Ht_add_out = GRU.add_gru_elementwise(graph, 'Add', [ht_Ht_out, ht_X_out],
f'{base_prefix}_X_Ht_add', dtype)
# linear_before_reset = 0: ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh)
X_Ht_act_out = GRU.add_gru_act(graph, g, X_Ht_add_out, f'{base_prefix}_X_Ht_g', dtype)
# linear_before_reset != 0: ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh)
# h_default = self.g(np.dot(x, np.transpose(w_h)) + np.dot(r * H_t, np.transpose(r_h)) + w_bh + r_bh)
# h_linear = self.g(np.dot(x, np.transpose(w_h)) + r * (np.dot(H_t, np.transpose(r_h)) + r_bh) + w_bh)
# h = h_linear if self.LBR else h_default
# (1 - zt)
zt_sub_out = GRU.add_gru_const_op(graph, 'Add', zt_act_out, np.array([-1]),
f'{base_prefix}_zt_const_add', dtype)
zt_mul_out = GRU.add_gru_const_op(graph, 'Mul', zt_sub_out, np.array([-1]),
f'{base_prefix}_zt_const_mul', dtype)
# (1 - zt) (.) ht
zt_mul0_out = GRU.add_gru_elementwise(graph, 'Mul', [zt_mul_out, X_Ht_act_out],
f'{base_prefix}_zt_mul0', dtype)
# zt (.) Ht-1
zt_mul1_out = GRU.add_gru_elementwise(graph, 'Mul', [zt_act_out, Ht_prev],
f'{base_prefix}_zt_mul1', dtype)
# Ht = (1 - zt) (.) ht + zt (.) Ht-1
out = GRU.add_gru_elementwise(graph,
'Add', [zt_mul0_out, zt_mul1_out],
f'{base_prefix}_out',
dtype,
ew_out=last_out)
else:
# zt = f(Xt*(Wz^T) + Wbz + Rbz)
zt_X_out = GRU.add_gru_conv(graph, X_slice, f'{base_prefix}_zt_X', W_vals, Wb_vals,
zt_index_W, dtype)
GRU.add_gru_bias(graph.nodes[-1], Rb_vals, zt_index_R)
zt_act_out = GRU.add_gru_act(graph, f, zt_X_out, f'{base_prefix}_zt_act_f', dtype)
# Xt*(Wh^T)
ht_X_out = GRU.add_gru_conv(graph, X_slice, f'{base_prefix}_X_ht', W_vals, Wb_vals,
ht_index_W, dtype)
GRU.add_gru_bias(graph.nodes[-1], Rb_vals, ht_index_R)
# ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh)
Ht_act_out = GRU.add_gru_act(graph, g, ht_X_out, f'{base_prefix}_X_Ht_g', dtype)
# (1 - zt)
zt_sub_out = GRU.add_gru_const_op(graph, 'Add', zt_act_out, np.array([-1]),
f'{base_prefix}_zt_const_add', dtype)
zt_mul_out = GRU.add_gru_const_op(graph, 'Mul', zt_sub_out, np.array([-1]),
f'{base_prefix}_zt_const_mul', dtype)
# (1 - zt) (.) ht
out = GRU.add_gru_elementwise(graph,
'Mul', [zt_mul_out, Ht_act_out],
f'{base_prefix}_zt_mul0',
dtype,
ew_out=last_out)
return out
@staticmethod
def qualifies_for_reconstruction(node):
result = node.op == 'GRU'
SUPPORTED_GRU_ACTIVATIONS = {'Sigmoid', 'Tanh', 'Relu'}
SUPPORTED_GRU_DIRECTIONS = {'forward'}
if result:
attrs = node.attrs
result &= attrs.get('layout', 0) == 1
result &= attrs.get('linear_before_reset', 0) == 0
result &= attrs.get('direction', 'forward') in SUPPORTED_GRU_DIRECTIONS
result &= attrs.get('activation_alpha', None) is None
result &= attrs.get('activation_beta', None) is None
result &= attrs.get('clip', None) is None
activations = attrs.get('activations', ['Sigmoid', 'Tanh'])
result &= len(activations) == 2
if result:
f, g = activations
result &= f in SUPPORTED_GRU_ACTIVATIONS
result &= g in SUPPORTED_GRU_ACTIVATIONS
return result
@staticmethod
def reconstruct(node, graph):
if GRU.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
X, W, R = node.inputs[:3]
batch_size, seq_length, input_size = X.shape
dtype = X.dtype
attrs = node.attrs
hidden_size = attrs['hidden_size']
direction = attrs.get('direction', 'forward')
activations = attrs.get('activations', ['Sigmoid', 'Tanh'])
linear_before_reset = attrs.get('linear_before_reset', 0) == 1
f, g = activations
num_directions = NUM_DIRECTIONS_DICT[direction]
Ht_prev_shape = [batch_size, hidden_size, num_directions, 1]
Ht_prev = None
if len(node.inputs) > 5:
Ht_prev = node.inputs[5]
Ht_prev.shape = Ht_prev_shape
assert isinstance(W, gs.Constant)
W_vals = [
GRU.from2dto4d(W.values[0, x * hidden_size:(x + 1) * hidden_size])
for x in range(3)
]
assert isinstance(R, gs.Constant)
R_vals = [
GRU.from2dto4d(R.values[0, x * hidden_size:(x + 1) * hidden_size])
for x in range(3)
]
Wb_vals = [None, None, None]
Rb_vals = [None, None, None]
if len(node.inputs) > 3:
assert isinstance(node.inputs[3], gs.Constant)
B_vals = node.inputs[3].values
B_dims = B_vals.shape
assert B_dims[0] == 1
cutoff = B_dims[1] // 6
vals = [B_vals[0, x * cutoff:(x + 1) * cutoff] for x in range(6)]
Wb_vals = vals[:3]
Rb_vals = vals[3:]
if seq_length == 1:
X.shape = [batch_size, input_size, 1, 1]
out = GRU.reconstruct_step(graph, node, 0, seq_length, X, Ht_prev, W_vals, Wb_vals,
R_vals, Rb_vals, f, g, dtype, linear_before_reset)
else:
X.shape = [batch_size, seq_length, input_size, 1]
X_transposed = GRU.add_gru_transpose(graph, X, (0, 2, 1, 3),
f'{node.name}_transpose_in', dtype)
for step_idx in range(seq_length):
X_slice = GRU.add_gru_slice(graph, step_idx, 2, X_transposed,
f'{node.name}_slice{step_idx}', dtype)
Ht_prev = GRU.reconstruct_step(graph, node, step_idx, seq_length, X_slice,
Ht_prev, W_vals, Wb_vals, R_vals, Rb_vals, f, g,
dtype, linear_before_reset)
out = Ht_prev
node.inputs.clear()
node.outputs.clear()
graph.cleanup().toposort()
@staticmethod
def onnx_inference(session, input_data, prev_output, reshape_input_dims=None):
if not isinstance(input_data, list):
input_data = [input_data]
session_inputs = session.get_inputs()
if reshape_input_dims is not None:
input_data[0] = np.reshape(input_data, reshape_input_dims)
if len(session_inputs) == 2:
input_data = [input_data[0], prev_output]
assert len(session_inputs) == len(
input_data), f'{len(session_inputs)} != {len(input_data)}'
input_dict = dict()
for tensor, data in zip(session_inputs, input_data):
assert tensor.type in ONNXRUNTIME_DT_TO_NUMPY_DT, 'No mapping from ONNX RT to NumPy data type detected, you may need to extend it'
dtype = ONNXRUNTIME_DT_TO_NUMPY_DT[tensor.type]
if data is None:
data = np.zeros(tensor.shape)
input_dict[tensor.name] = data.astype(dtype)
output_names = [out.name for out in session.get_outputs()]
outputs = session.run(output_names, input_dict)
return outputs
@staticmethod
def insert_transpose(graph, node, input_direction=True):
perm = (1, 0, 2)
tensors = node.inputs if input_direction else node.outputs
for idx, tensor in enumerate(tensors):
if not isinstance(tensor, gs.Constant):
transpose_name = f'{tensor.name}_transpose'
transpose_tmp = gs.Variable(name=transpose_name, dtype=tensor.dtype)
if input_direction:
node_inputs = [tensor]
node_outputs = [transpose_tmp]
node.inputs[idx] = transpose_tmp
else:
node_inputs = [transpose_tmp]
node_outputs = [tensor]
node.outputs[idx] = transpose_tmp
transpose_node = gs.Node(op='Transpose',
attrs=dict(perm=perm),
name=transpose_name,
inputs=node_inputs,
outputs=node_outputs)
graph.nodes.append(transpose_node)
@staticmethod
def patch_gru_layout(onnx_path_orig):
# layout 1 is not supported in ONNX Runtime on CPU yet:
# initialization: /Users/runner/work/1/s/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h:55 onnxruntime::DeepCpuGruOp::DeepCpuGruOp(const onnxruntime::OpKernelInfo &) layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification
graph = gs.import_onnx(onnx.load(onnx_path_orig))
patched = False
patched_path = onnx_path_orig
for node in graph.nodes:
if node.op == 'GRU' and node.attrs.get('layout', 0) == 1:
node.attrs['layout'] = 0
GRU.insert_transpose(graph, node, input_direction=True)
GRU.insert_transpose(graph, node, input_direction=False)
patched = True
if patched:
graph.cleanup().toposort()
patched_path = 'tmp.onnx'
onnx.save(gs.export_onnx(graph), patched_path)
return patched_path
def run_comparison(self,
onnx_paths,
input_shapes=None,
incremental_vals=False,
input_data=None,
num_iterations=3,
index_reconstructed=-1):
assert len(onnx_paths) == 2
self.verify_reconstruction(onnx_paths[index_reconstructed][0])
if input_data is None:
assert input_shapes is not None
if incremental_vals:
input_data = [
np.arange(np.product(shape), dtype=np.float32).reshape(shape)
for shape in input_shapes
]
else:
input_data = [np.random.rand(*shape).astype(np.float32) for shape in input_shapes]
else:
assert not incremental_vals
assert input_shapes is None
index_orig = index_reconstructed - 1
onnx_paths[index_orig] = list(onnx_paths[index_orig])
onnx_paths[index_orig][0] = GRU.patch_gru_layout(onnx_paths[index_orig][0])
session_outputs = list()
for onnx_file, reshape_input_dims in onnx_paths:
session = onnxruntime.InferenceSession(onnx_file)
prev_output = None
iteration_outputs = list()
for _ in range(num_iterations):
session_output = GRU.onnx_inference(session, input_data, prev_output,
reshape_input_dims)
prev_output = session_output[0]
iteration_outputs.append(prev_output)
session_outputs.append(iteration_outputs)
for iteration in range(num_iterations):
maxabsdiff = np.abs(session_outputs[0][iteration].flatten().astype(np.float32) -
session_outputs[1][iteration].flatten().astype(np.float32)).max()
info(
f'Max absdiff between {onnx_paths[0]} and {onnx_paths[1]} after {num_iterations} iterations: {maxabsdiff}'
)
return maxabsdiff
def test_config(self,
input_shapes=None,
hidden_size=None,
num_iterations=None,
use_bias=False,
use_initial_h=True,
linear_before_reset=0,
layout=1):
attrs = dict(hidden_size=hidden_size,
linear_before_reset=linear_before_reset,
layout=layout)
orig_graph = self.generate(input_shapes,
attrs,
use_bias=use_bias,
use_initial_h=use_initial_h)
orig_path = Operator.save_gs_graph(orig_graph, run_shape_inference=True, opset=14)
reconstructed_graph = gs.import_onnx(onnx.load(orig_path))
for node in reconstructed_graph.nodes:
self.reconstruct(node, reconstructed_graph)
reconstructed_graph.name = orig_graph.name.replace('_orig', '_reconstructed')
reconstructed_path = GRU.save_gs_graph(reconstructed_graph, run_shape_inference=True)
reconstructed_input_shape = reconstructed_graph.inputs[0].shape
maxabsdiff = self.run_comparison([(orig_path, None),
(reconstructed_path, reconstructed_input_shape)],
input_shapes=input_shapes,
num_iterations=num_iterations)
return maxabsdiff
def test(self, input_shapes=None, hidden_size=None, num_iterations=None):
input_shapes = input_shapes or [(2, 4, 3)]
hidden_size = hidden_size or 5
num_iterations = num_iterations or 2
maxabsdiff = 0.0
for use_bias in [False, True]:
for use_initial_h in [False, True]:
maxabsdiff = max(
maxabsdiff,
self.test_config(input_shapes=input_shapes,
hidden_size=hidden_size,
num_iterations=num_iterations,
use_bias=use_bias,
use_initial_h=use_initial_h))
return maxabsdiff
def main():
op = GRU()
num_iterations = 3
op.test(num_iterations=num_iterations)
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/GRU.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of a DepthToSpace op."""
from logging import info
import numpy as np
import onnx_graphsurgeon as gs
from common.Operator import Operator
class DepthToSpace(Operator):
def generate(self, input_shapes, attrs):
graph = super().generate(input_shapes, attrs=attrs)
mode = attrs.get('mode', 'CRD')
graph.name = f'{self.op}_{mode}_orig'
return graph
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
mode = node.attrs.get('mode', 'CRD')
blocksize = node.attrs.get('blocksize', 1)
channels_in = node.inputs[0].shape[1]
assert channels_in % (blocksize * blocksize) == 0
channels_out = channels_in // (blocksize * blocksize)
node.op = 'ConvTranspose'
node.attrs = dict(kernel_shape=(blocksize, blocksize), strides=(blocksize, blocksize))
weight_shape = (channels_in, channels_out, blocksize, blocksize)
weight_vals = np.zeros(weight_shape, dtype=np.float32)
if mode == 'CRD':
for c in range(channels_in):
filter_slice = weight_vals[c, :].flatten()
filter_slice[c] = 1
weight_vals[c, :] = filter_slice.reshape(channels_out, blocksize, blocksize)
elif mode == 'DCR':
stacked_kernels = weight_vals.reshape(channels_in,
channels_out * blocksize * blocksize)
for block_idx in range(channels_in):
kernel_one_idx = block_idx // channels_out + (
block_idx % channels_out) * blocksize * blocksize
stacked_kernels[block_idx][kernel_one_idx] = 1
weight_vals = stacked_kernels.reshape(weight_shape)
else:
assert False, f'Unknown DepthToSpace mode: {mode}'
weight_constant = gs.Constant(name=f'{node.name}_tmp0', values=weight_vals)
node.inputs.append(weight_constant)
def test(self, input_shapes=None, blocksize=None, modes=None):
input_shapes = input_shapes or [(1, 512, 48, 48)]
blocksize = blocksize or 4
modes = modes or ['CRD', 'DCR']
maxabsdiff = 0.0
for mode in modes:
attrs = dict(blocksize=blocksize, mode=mode)
maxabsdiff = max(super().test(input_shapes, attrs=attrs), maxabsdiff)
return maxabsdiff
def main():
op = DepthToSpace()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/DepthToSpace.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Reconstruction of an And op."""
from logging import info
from common.BinaryOperator import BinaryOperator
import numpy as np
class And(BinaryOperator):
@classmethod
def reconstruct(cls, node, graph):
if cls.qualifies_for_reconstruction(node):
info(f'Reconstructing {node.op} node "{node.name}"...')
node.op = 'Mul'
for tensor in node.inputs + node.outputs:
tensor.dtype = np.float32
def test(self):
input_shapes = [(1, 4, 1, 1)] * 2
input_data = list()
input_data.append(np.array([True, True, False, False]).reshape(input_shapes[0]))
input_data.append(np.array([True, False, True, False]).reshape(input_shapes[1]))
return super().test(input_data=input_data)
def main():
op = And()
op.test()
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
operators/op_reconstruction/And.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Utils for ONNX Runtime inference."""
import onnxruntime as ort
import numpy as np
ONNXRUNTIME_DT_TO_NUMPY_DT = {'tensor(bool)': bool, 'tensor(float)': np.float32}
def onnx_inference(onnx_file, input_data, session_options=None):
session = ort.InferenceSession(onnx_file, session_options)
if not isinstance(input_data, list):
input_data = [input_data]
session_inputs = session.get_inputs()
assert len(session_inputs) == len(input_data)
input_dict = dict()
for tensor, data in zip(session_inputs, input_data):
assert tensor.type in ONNXRUNTIME_DT_TO_NUMPY_DT, 'No mapping from ONNX RT to NumPy data type detected, you may need to extend it'
dtype = ONNXRUNTIME_DT_TO_NUMPY_DT[tensor.type]
input_dict[tensor.name] = data.astype(dtype)
output_name = session.get_outputs()[0].name
output = session.run([output_name], input_dict)
return output
|
Deep-Learning-Accelerator-SW-main
|
operators/common/onnxruntime_utils.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Generic Operator class for op reconstructions."""
from logging import info
from onnx import shape_inference
import onnx
import onnx_graphsurgeon as gs
import os
import json
from collections import OrderedDict
from common.onnxruntime_utils import onnx_inference
import numpy as np
class Operator():
def __init__(self):
self.node_count = 0
self.tensor_count = 0
self.op = type(self).__name__
def generate(self, input_shapes, **kwargs):
if not isinstance(input_shapes, list):
input_shapes = [input_shapes]
dtype = kwargs.get('dtype', np.float32)
inputs = [
gs.Variable(name=self.new_tensor_name(), dtype=dtype, shape=shape)
for shape in input_shapes
]
outputs = [gs.Variable(name=self.new_tensor_name(), dtype=dtype)]
node = gs.Node(op=self.op, inputs=inputs, outputs=outputs, name=self.new_node_name())
if 'attrs' in kwargs:
node.attrs = kwargs['attrs']
nonconst_inputs = [x for x in node.inputs if not isinstance(x, gs.Constant)]
graph = gs.Graph(nodes=[node], inputs=nonconst_inputs, outputs=node.outputs)
graph.name = f'{self.op}_orig'
return graph
@classmethod
def op_to_reconstruct(cls):
return cls.__name__
@classmethod
def qualifies_for_reconstruction(cls, node):
return node.op == cls.op_to_reconstruct()
@classmethod
def reconstruct(cls, node, graph, **kwargs):
if cls.qualifies_for_reconstruction():
# specialize here
pass
def test(self, input_shapes, **kwargs):
orig_graph = self.generate(input_shapes, **kwargs)
orig_path = Operator.save_gs_graph(orig_graph, run_shape_inference=True)
reconstructed_graph = gs.import_onnx(onnx.load(orig_path))
for node in reconstructed_graph.nodes:
self.reconstruct(node, reconstructed_graph)
reconstructed_graph.name = orig_graph.name.replace('_orig', '_reconstructed')
reconstructed_graph = Operator.save_gs_graph(reconstructed_graph)
maxabsdiff = self.run_comparison([orig_path, reconstructed_graph], input_shapes)
return maxabsdiff
def new_node_name(self):
result = f'{self.op}_{self.node_count}'
self.node_count += 1
return result
def new_tensor_name(self):
result = f'tensor_{self.tensor_count}'
self.tensor_count += 1
return result
@staticmethod
def save_gs_graph(graph,
top_dir='./models',
run_shape_inference=False,
opset=13,
producer='onnx_graphsurgeon'):
graph.opset = opset
graph.producer = producer
if run_shape_inference:
for node in graph.nodes:
for out in node.outputs:
out.shape = None
model = gs.export_onnx(graph)
if run_shape_inference:
model = shape_inference.infer_shapes(model)
os.makedirs(top_dir, exist_ok=True)
nonconst_inputs = [x for x in graph.inputs if not isinstance(x, gs.Constant)]
input_shape = 'x'.join([str(x) for x in nonconst_inputs[0].shape])
path_name = os.path.join(top_dir, f'{graph.name}_{input_shape}.onnx')
info(f'Saving file {path_name}...')
onnx.save(model, path_name)
return path_name
def verify_reconstruction(self, onnx_file):
graph = gs.import_onnx(onnx.load(onnx_file))
for node in graph.nodes:
assert node.op != self.op, f'Node "{node.name}" of op type "{node.op}" has not been reconstructed'
@staticmethod
def save_scales(json_path, key_to_range):
ranges_dict = OrderedDict()
offset = 0
for key, (low, high) in key_to_range.items():
scale = max(abs(low), abs(high)) / 127.0
ranges_dict[key] = dict(scale=scale, min=float(low), max=float(high), offset=offset)
with open(json_path, 'w') as outfile:
outfile.write(json.dumps(ranges_dict, indent=4))
def save_inputs_and_ref_outputs(cls, reconstructed_path, inputs, output, dtypes=[np.float16, np.int8]):
DTYPE_STRINGS = {np.float16: 'fp16', np.int8: 'int8'}
def preprocess(data, dtype):
data = data.copy()
if dtype == np.int8 and data.dtype != bool:
dyn_range = max(np.abs(data.max()), np.abs(data.min()))
scaling_factor = dyn_range / 127.0
data /= scaling_factor
data = np.round(data).clip(-127, 127)
data = data.astype(dtype)
return data
results_dir = reconstructed_path.replace('.onnx', '')
graph = gs.import_onnx(onnx.load(reconstructed_path))
nonconst_inputs = [x for x in graph.inputs if not isinstance(x, gs.Constant)]
assert len(nonconst_inputs) == len(inputs)
os.makedirs(results_dir, exist_ok=True)
key_to_range = dict()
for dtype in dtypes:
for idx, (inp_data, inp_tensor) in enumerate(zip(inputs, nonconst_inputs)):
np.save(os.path.join(results_dir, f'inputs_{idx}_{DTYPE_STRINGS[dtype]}.npy'),
preprocess(inp_data, dtype))
if dtype == np.int8:
key_to_range[inp_tensor.name] = (inp_data.min(), inp_data.max())
np.save(os.path.join(results_dir, f'outputs_0_{DTYPE_STRINGS[dtype]}.npy'),
preprocess(output, dtype))
if dtype == np.int8:
key_to_range[graph.outputs[0].inputs[0].name] = (output.min(), output.max())
Operator.save_scales(os.path.join(results_dir, 'int8.json'), key_to_range)
def run_comparison(self,
onnx_paths,
input_shapes=None,
incremental_vals=False,
input_data=None,
index_reconstructed=-1,
session_options=None):
assert len(onnx_paths) == 2
self.verify_reconstruction(onnx_paths[index_reconstructed])
if input_data is None:
assert input_shapes is not None
if incremental_vals:
input_data = [
np.arange(np.product(shape), dtype=np.float32).reshape(shape)
for shape in input_shapes
]
else:
input_data = [np.random.rand(*shape).astype(np.float32) for shape in input_shapes]
else:
assert not incremental_vals
assert input_shapes is None
session_outputs = list()
for onnx_file in onnx_paths:
output_data = onnx_inference(onnx_file, input_data, session_options)
session_outputs.append(output_data[0])
maxabsdiff = np.abs(session_outputs[0].flatten().astype(np.float32) -
session_outputs[1].flatten().astype(np.float32)).max()
info(f'Max absdiff between {onnx_paths[0]} and {onnx_paths[1]}: {maxabsdiff}')
self.save_inputs_and_ref_outputs(onnx_paths[1], input_data, session_outputs[0])
return maxabsdiff
|
Deep-Learning-Accelerator-SW-main
|
operators/common/Operator.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Common utils for op reconstructions."""
import logging
import numpy as np
np.random.seed(0xdeadbeef)
logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
|
Deep-Learning-Accelerator-SW-main
|
operators/common/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""BinaryOperator class for op reconstructions."""
import onnx
import numpy as np
import onnx_graphsurgeon as gs
from common.Operator import Operator
class BinaryOperator(Operator):
def generate(self, input_shapes, **kwargs):
kwargs['dtype'] = bool
return super().generate(input_shapes, **kwargs)
def test(self, input_data):
if not isinstance(input_data, list):
input_data = [input_data]
input_shapes = [data.shape for data in input_data]
orig_graph = self.generate(input_shapes)
orig_path = Operator.save_gs_graph(orig_graph, run_shape_inference=True)
reconstructed_graph = gs.import_onnx(onnx.load(orig_path))
for node in reconstructed_graph.nodes:
self.reconstruct(node, reconstructed_graph)
reconstructed_graph.name = orig_graph.name.replace('_orig', '_reconstructed')
reconstructed_path = Operator.save_gs_graph(reconstructed_graph)
maxabsdiff = self.run_comparison([orig_path, reconstructed_path], input_data=input_data)
return maxabsdiff
|
Deep-Learning-Accelerator-SW-main
|
operators/common/BinaryOperator.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""IndexSelectionOperator class for reconstructions of Gather/Scatter-style ops."""
import numpy as np
import onnx_graphsurgeon as gs
from common.Operator import Operator
class IndexSelectionOperator(Operator):
expected_num_inputs = 1
@staticmethod
def insert_transposes_if_needed(node, graph, axis):
transposes_needed = False
first_permute = None
second_permute = None
if axis == 1:
transposes_needed = False
elif axis == 2:
# move axis 2 to 1
first_permute = (0, 2, 3, 1)
# move axis 1 to 2 again
second_permute = (0, 3, 1, 2)
transposes_needed = True
elif axis == 3:
# move axis 3 to 1
first_permute = (0, 3, 2, 1)
# move axis 1 to 3 again
second_permute = (0, 3, 2, 1)
transposes_needed = True
else:
assert False, 'Not implemented'
if transposes_needed:
nonconst_inputs = [(idx, x) for idx, x in enumerate(node.inputs)
if not isinstance(x, gs.Constant)]
new_inputs = list(node.inputs)
for orig_idx, inp in nonconst_inputs:
orig_input_shape = inp.shape
new_input_shape = [orig_input_shape[x] for x in first_permute]
first_tmp_tensor = gs.Variable(name=f'{node.name}_transpose0_{orig_idx}',
dtype=np.float32,
shape=new_input_shape)
first_transpose = gs.Node(op='Transpose',
inputs=[inp],
outputs=[first_tmp_tensor],
attrs=dict(perm=first_permute),
name=first_tmp_tensor.name)
graph.nodes.append(first_transpose)
new_inputs[orig_idx] = first_tmp_tensor
node.inputs = new_inputs
second_tmp_tensor = gs.Variable(name=f'{node.name}_tmp1', dtype=np.float32, shape=None)
second_transpose = gs.Node(op='Transpose',
inputs=[second_tmp_tensor],
outputs=[node.outputs[0]],
attrs=dict(perm=second_permute),
name=second_tmp_tensor.name)
graph.nodes.append(second_transpose)
node.outputs = [second_tmp_tensor]
graph.cleanup().toposort()
return transposes_needed
@classmethod
def qualifies_for_reconstruction(cls, node):
result = super().qualifies_for_reconstruction(node)
if result:
axis = node.attrs.get('axis', 0)
result = result and len(node.inputs) == cls.expected_num_inputs
result &= result and isinstance(node.inputs[1], gs.Constant)
result &= result and axis in {1, 2, 3}
return result
|
Deep-Learning-Accelerator-SW-main
|
operators/common/IndexSelectionOperator.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""ONNX preparation for ResNet-50 translated by QDQ Translator."""
import os
import onnx
import numpy as np
import onnx_graphsurgeon as gs
from onnx import shape_inference
import common
def expand_first_conv_pads(graph, first_conv_name):
node_dict = dict()
for node in graph.nodes:
node_dict[node.name] = node
# add padding
node_dict[first_conv_name].attrs['pads'] = (3, 3, 3, 3)
def main():
onnx_file_orig = 'tools/qdq-translator/translated/resnet_50v1_noqdq.onnx'
if not os.path.isfile(onnx_file_orig):
print(f'Could not find {onnx_file_orig}, first prepare it as detailed in instructions.')
exit(0)
onnx_file_chopped = onnx_file_orig.replace('.onnx', '_prepared.onnx')
graph = gs.import_onnx(onnx.load(onnx_file_orig))
input_shape = (-1, 3, 224, 224)
input_names = ['StatefulPartitionedCall/resnet50/quant_conv1_conv/BiasAdd__511:0']
output_names = ['Identity:0']
common.extract_subgraph(graph, input_names, output_names, input_shape)
expand_first_conv_pads(
graph, first_conv_name='StatefulPartitionedCall/resnet50/quant_conv1_conv/BiasAdd')
common.simplify_classification_head(
graph,
mean_name='StatefulPartitionedCall/resnet50/quant_avg_pool/Mean',
squeeze_name='StatefulPartitionedCall/resnet50/quant_avg_pool/Mean_Squeeze__1117',
matmul_name='StatefulPartitionedCall/resnet50/quant_predictions/MatMul',
add_name='StatefulPartitionedCall/resnet50/quant_predictions/BiasAdd'
, softmax_name='StatefulPartitionedCall/resnet50/quant_predictions/Softmax'
)
print(f'Saving the resulting model to {onnx_file_chopped}...')
onnx_model = gs.export_onnx(graph)
onnx_model = shape_inference.infer_shapes(onnx_model)
onnx.save(onnx_model, onnx_file_chopped)
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
scripts/prepare_models/resnet50_noqdq.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""ONNX preparation for SSD-ResNet-34."""
import os
import onnx
import onnx_graphsurgeon as gs
from onnx import shape_inference
import common
def main():
onnx_file_orig = './resnet34-ssd1200.onnx'
if not os.path.isfile(onnx_file_orig):
print(f'Could not find {onnx_file_orig}, first download it as detailed in instructions.')
exit(0)
onnx_file_chopped = onnx_file_orig.replace('.onnx', '_prepared.onnx')
graph = gs.import_onnx(onnx.load(onnx_file_orig))
input_shape = (-1, 3, 1200, 1200)
input_names = ['image']
fuse_conv_outputs = [('Conv_338', 'Conv_349'), ('Conv_360', 'Conv_371'),
('Conv_382', 'Conv_393'), ('Conv_404', 'Conv_415'),
('Conv_426', 'Conv_437'), ('Conv_448', 'Conv_459')]
output_names = list()
for fused_pair in fuse_conv_outputs:
output_names.extend(fused_pair)
common.extract_subgraph(graph, input_names, output_names, input_shape)
common.fuse_convs_horizontally(graph, fuse_conv_outputs)
print(f'Saving the resulting model to {onnx_file_chopped}...')
onnx_model = gs.export_onnx(graph)
onnx_model = shape_inference.infer_shapes(onnx_model)
onnx.save(onnx_model, onnx_file_chopped)
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
scripts/prepare_models/ssd_resnet34.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""ONNX preparation for SSD-MobileNetV1."""
import onnx
import onnx_graphsurgeon as gs
from onnx import shape_inference
import os
import common
def main():
onnx_file_orig = './ssd_mobilenet_v1_coco_2018_01_28.onnx'
if not os.path.isfile(onnx_file_orig):
print(f'Could not find {onnx_file_orig}, first download it as detailed in instructions.')
exit(0)
graph = gs.import_onnx(onnx.load(onnx_file_orig))
input_shape = (-1, 3, 300, 300)
input_names = ['Preprocessor/sub:0']
output_names = list()
fuse_conv_outputs = list()
for idx in range(6):
fused_nodes = list()
for appendix in ['/BoxEncodingPredictor/BiasAdd:0', '/ClassPredictor/BiasAdd:0']:
fused_nodes.append(f'BoxPredictor_{idx}{appendix}')
output_names.extend(fused_nodes)
fuse_conv_outputs.append(fused_nodes)
common.extract_subgraph(graph, input_names, output_names, input_shape)
onnx_model = gs.export_onnx(graph)
onnx_model = shape_inference.infer_shapes(onnx_model)
onnx_file_chopped = onnx_file_orig.replace('.onnx', '_prepared.onnx')
graph = gs.import_onnx(onnx_model)
common.fuse_into_conv(graph, common.fuse_mul_into_conv, ('Conv', 'Mul'))
common.fuse_into_conv(graph, common.fuse_add_into_conv, ('Conv', 'Add'))
common.fuse_convs_horizontally(graph, fuse_conv_outputs)
model_opt = gs.export_onnx(graph)
onnx.save(model_opt, onnx_file_chopped)
print(f'Saving the resulting model to {onnx_file_chopped}...')
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
scripts/prepare_models/ssd_mobilenetv1.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""Common utilities to prepare ONNX models."""
import onnx_graphsurgeon as gs
import numpy as np
def fuse_mul_into_conv(conv_node, mul_node):
assert len(conv_node.inputs) == 2, 'Not supporting conv with bias for fuse_mul_into_conv'
conv_weights = conv_node.inputs[1]
assert len(mul_node.inputs) == 2
mul_factors = mul_node.inputs[1]
assert isinstance(mul_factors, gs.Constant)
assert tuple(mul_factors.shape[:1] + mul_factors.shape[2:4]) == (1, 1, 1)
mul_factors_data = mul_factors.values.reshape(-1, 1, 1, 1)
conv_weights.values *= mul_factors_data
mul_node.inputs.clear()
conv_node.outputs = [mul_node.outputs[0]]
mul_node.outputs.clear()
def fuse_add_into_conv(conv_node, add_node):
assert len(conv_node.inputs) == 2, 'Not supporting conv with bias for fuse_add_into_conv'
assert len(add_node.inputs) == 2
add_params = add_node.inputs[1]
assert isinstance(add_params, gs.Constant)
assert tuple(add_params.shape[:1] + add_params.shape[2:4]) == (1, 1, 1)
add_params.values = add_params.values.flatten()
conv_node.inputs.append(add_params)
add_node.inputs.clear()
conv_node.outputs = [add_node.outputs[0]]
add_node.outputs.clear()
def fuse_into_conv(graph, fusion_func, node_pattern):
graph.toposort()
for node in graph.nodes:
qualifies = len(node.outputs) == 1
qualifies = qualifies and node.outputs[0] not in graph.outputs
qualifies = qualifies and len(node.outputs[0].outputs) == 1
next_node = node.outputs[0].outputs[0] if qualifies else None
next_node_var_inputs = [inp for inp in next_node.inputs if not isinstance(inp, gs.Constant)] if next_node is not None else list()
qualifies = qualifies and len(next_node_var_inputs) == 1 and next_node_var_inputs[0] == node.outputs[0]
if qualifies and (node.op, next_node.op) == node_pattern:
fusion_func(node, next_node)
graph = graph.cleanup()
def fuse_convs_horizontally(graph, fuse_conv_outputs, new_names=None):
def fuse_convs(convs, new_name=None):
assert len(convs) > 0
first_conv = convs[0]
first_weights = first_conv.inputs[1]
first_bias = first_conv.inputs[2] if len(first_conv.inputs) > 1 else np.zeros(
first_weights.shape[0], dtype=first_weights.dtype)
for conv in convs[1:]:
conv_weights = conv.inputs[1]
bias = conv.inputs[2] if len(conv.inputs) > 1 else np.zeros(conv_weights.shape[0],
dtype=conv_weights.dtype)
assert conv.attrs == first_conv.attrs
assert conv_weights.shape[1] == first_weights.shape[1]
assert tuple(conv_weights.shape[2:4]) == tuple(first_weights.shape[2:4])
first_weights.values = np.concatenate([first_weights.values, conv_weights.values],
axis=0)
first_bias.values = np.concatenate([first_bias.values, bias.values], axis=0)
if new_name is None:
first_conv.name += f'+{conv.name}'
first_conv.outputs[0].name += f'+{conv.outputs[0].name}'
else:
first_conv.name = new_name
first_conv.outputs[0].name = new_name
graph.outputs.remove(conv.outputs[0])
conv.inputs.clear()
conv.outputs.clear()
graph.cleanup()
if new_names is not None:
assert len(new_names) == len(fuse_conv_outputs)
tensors = graph.tensors()
for idx, fused_outputs in enumerate(fuse_conv_outputs):
fused_convs = list()
for output in fused_outputs:
tensor = tensors[output]
assert len(tensor.inputs) == 1
conv = tensor.inputs[0]
assert conv.inputs[0] not in graph.outputs
fused_convs.append(conv)
new_name = new_names[idx] if new_names is not None else None
fuse_convs(fused_convs, new_name)
def extract_subgraph(graph, input_names, output_names, input_shape):
tensors = graph.tensors()
for tensor in tensors.values():
if tensor.shape is not None and not isinstance(tensor, gs.Constant) and len(tensor.shape) > 0:
tensor.shape[0] = input_shape[0]
graph.inputs = [
tensors[name].to_variable(dtype=np.float32, shape=input_shape) for name in input_names
]
graph.outputs = [tensors[name].to_variable(dtype=np.float32) for name in output_names]
graph = graph.cleanup()
# makes node transformations easier:
for node in graph.nodes:
for out in node.outputs:
out.shape = None
def to_resize_with_scales(graph):
for node in graph.nodes:
if node.op == 'Resize' and len(node.inputs) > 3:
sizes = node.inputs[3].values.astype(np.float32)
input_shape = np.array(node.inputs[0].shape, dtype=np.float32)
input_shape[0] = 1
scales_vals = sizes / input_shape
scales = gs.Constant(name=f'{node.name}_scales', values=scales_vals)
node.inputs = node.inputs[:2] + [scales]
def simplify_classification_head(graph, mean_name, squeeze_name, matmul_name, add_name, softmax_name=None):
node_dict = dict()
for node in graph.nodes:
node_dict[node.name] = node
# ReduceMean -> AveragePool:
node_dict[mean_name].op = 'AveragePool'
node_dict[mean_name].attrs = {'kernel_shape': [7, 7]}
# {Squeeze, MatMul, Add} -> 1x1 Conv:
node_dict[squeeze_name].inputs.clear()
node_dict[squeeze_name].outputs.clear()
node_dict[matmul_name].op = 'Conv'
matmul_weights = node_dict[matmul_name].inputs[1]
matmul_weights.values = matmul_weights.values.T
matmul_weights.values = np.expand_dims(matmul_weights.values, [-2, -1])
bias_weights = node_dict[add_name].inputs[1]
node_dict[matmul_name].inputs = [
node_dict[mean_name].outputs[0], matmul_weights, bias_weights
]
node_dict[matmul_name].outputs = [
node_dict[add_name].outputs[0]
]
node_dict[matmul_name].attrs = {'kernel_shape': [1, 1]}
node_dict[add_name].inputs.clear()
node_dict[add_name].outputs.clear()
if softmax_name is not None:
node_dict[softmax_name].attrs['axis'] = 1
graph.cleanup()
|
Deep-Learning-Accelerator-SW-main
|
scripts/prepare_models/common.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""ONNX preparation for RetinaNet-ResNeXt-50."""
import onnx
import onnx_graphsurgeon as gs
from onnx import shape_inference
import os
import common
def main():
onnx_file_orig = './resnext50_32x4d_fpn_sanitized.onnx'
if not os.path.isfile(onnx_file_orig):
print(
f'Could not find {onnx_file_orig}, first download it and sanitize it as detailed in instructions.'
)
exit(0)
graph = gs.import_onnx(onnx.load(onnx_file_orig))
input_shape = (-1, 3, 800, 800)
input_names = ['input']
output_names = [
'onnx::Shape_1640', 'onnx::Shape_1597', 'onnx::Shape_1554', 'onnx::Shape_1511',
'onnx::Shape_1468', 'onnx::Shape_1856', 'onnx::Shape_1813', 'onnx::Shape_1770',
'onnx::Shape_1727', 'onnx::Shape_1684'
]
common.extract_subgraph(graph, input_names, output_names, input_shape)
onnx_model = gs.export_onnx(graph)
onnx_model = shape_inference.infer_shapes(onnx_model)
onnx_file_chopped = onnx_file_orig.replace('.onnx', '_prepared.onnx')
graph = gs.import_onnx(onnx_model)
common.to_resize_with_scales(graph)
common.fuse_into_conv(graph, common.fuse_mul_into_conv, ('Conv', 'Mul'))
common.fuse_into_conv(graph, common.fuse_add_into_conv, ('Conv', 'Add'))
model_opt = gs.export_onnx(graph)
onnx.save(model_opt, onnx_file_chopped)
print(f'Saving the resulting model to {onnx_file_chopped}...')
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
scripts/prepare_models/retinanet_resnext50.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""ONNX preparation for ResNet-50."""
import os
import onnx
import numpy as np
import onnx_graphsurgeon as gs
from onnx import shape_inference
import common
def simplify_classification_head(graph):
node_dict = dict()
for node in graph.nodes:
node_dict[node.name] = node
# ReduceMean -> AveragePool:
node_dict['resnet_model/Mean'].op = 'AveragePool'
node_dict['resnet_model/Mean'].attrs = {'kernel_shape': [7, 7]}
# {Squeeze, MatMul, Add} -> 1x1 Conv:
node_dict['resnet_model/Squeeze'].inputs.clear()
node_dict['resnet_model/Squeeze'].outputs.clear()
node_dict['resnet_model/dense/MatMul'].op = 'Conv'
matmul_weights = node_dict['resnet_model/dense/MatMul'].inputs[1]
matmul_weights.values = matmul_weights.values.T
matmul_weights.values = np.expand_dims(matmul_weights.values, [-2, -1])
bias_weights = node_dict['resnet_model/dense/BiasAdd'].inputs[1]
node_dict['resnet_model/dense/MatMul'].inputs = [
node_dict['resnet_model/Mean'].outputs[0], matmul_weights, bias_weights
]
node_dict['resnet_model/dense/MatMul'].outputs = [
node_dict['resnet_model/dense/BiasAdd'].outputs[0]
]
node_dict['resnet_model/dense/MatMul'].attrs = {'kernel_shape': [1, 1]}
node_dict['resnet_model/dense/BiasAdd'].inputs.clear()
node_dict['resnet_model/dense/BiasAdd'].outputs.clear()
graph.cleanup()
def main():
onnx_file_orig = './resnet50_v1.onnx'
if not os.path.isfile(onnx_file_orig):
print(f'Could not find {onnx_file_orig}, first download it as detailed in instructions.')
exit(0)
onnx_file_chopped = onnx_file_orig.replace('.onnx', '_prepared.onnx')
graph = gs.import_onnx(onnx.load(onnx_file_orig))
input_shape = (-1, 3, 224, 224)
input_names = ['input_tensor:0']
output_names = ['resnet_model/dense/BiasAdd:0']
common.extract_subgraph(graph, input_names, output_names, input_shape)
simplify_classification_head(graph)
print(f'Saving the resulting model to {onnx_file_chopped}...')
onnx_model = gs.export_onnx(graph)
onnx_model = shape_inference.infer_shapes(onnx_model)
onnx.save(onnx_model, onnx_file_chopped)
if __name__ == '__main__':
main()
|
Deep-Learning-Accelerator-SW-main
|
scripts/prepare_models/resnet50.py
|
import triton_python_backend_utils as pb_utils
import json
import asyncio
import numpy as np
import cupy
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# You must parse model_config. JSON string is not parsed here
self.model_config = json.loads(args['model_config'])
# You must add the Python 'async' keyword to the beginning of `execute`
# function if you want to use `async_exec` function.
async def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get INPUT0
in_0 = pb_utils.get_input_tensor_by_name(request, 'input__0')
# List of awaitables containing inflight inference responses.
inference_response_awaits = []
for model_name in ['fil_0', 'fil_1', 'fil_2', 'fil_3', 'fil_4', 'fil_5', 'fil_6', 'fil_7', 'fil_8', 'fil_9', 'fil_10']:
# Create inference request object
infer_request = pb_utils.InferenceRequest(
model_name=model_name,
requested_output_names=["output__0"],
inputs=[in_0])
# Store the awaitable inside the array. We don't need
# the inference response immediately so we do not `await`
# here.
inference_response_awaits.append(infer_request.async_exec())
# Wait for all the inference requests to finish. The execution
# of the Python script will be blocked until all the awaitables
# are resolved.
inference_responses = await asyncio.gather(
*inference_response_awaits)
for infer_response in inference_responses:
# Make sure that the inference response doesn't have an error.
# If it has an error and you can't proceed with your model
# execution you can raise an exception.
if infer_response.has_error():
raise pb_utils.TritonModelException(
infer_response.error().message())
# Get the OUTPUT0 from the "pytorch" model inference resposne
#xgb0_output0_tensor = pb_utils.get_output_tensor_by_name(inference_responses[0], "output__0")
#python_model_output0 = pb_utils.Tensor.from_dlpack("OUTPUT0", xgb0_output0_tensor.to_dlpack())
#xgb0_output0_tensor = pb_utils.get_output_tensor_by_name(inference_responses[0], "output__0")
xgb0_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[0], "output__0").to_dlpack())
xgb1_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[1], "output__0").to_dlpack())
xgb2_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[2], "output__0").to_dlpack())
xgb3_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[3], "output__0").to_dlpack())
xgb4_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[4], "output__0").to_dlpack())
xgb5_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[5], "output__0").to_dlpack())
xgb6_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[6], "output__0").to_dlpack())
xgb7_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[7], "output__0").to_dlpack())
xgb8_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[8], "output__0").to_dlpack())
xgb9_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[9], "output__0").to_dlpack())
xgb10_output0_tensor = cupy.fromDlpack(pb_utils.get_output_tensor_by_name(inference_responses[10], "output__0").to_dlpack())
xgb_final_avg_pred = cupy.average((xgb0_output0_tensor, xgb1_output0_tensor, xgb2_output0_tensor,
xgb3_output0_tensor, xgb4_output0_tensor, xgb5_output0_tensor,
xgb6_output0_tensor, xgb7_output0_tensor, xgb8_output0_tensor,
xgb9_output0_tensor, xgb10_output0_tensor), axis=0)
#xgb_final_avg_pred_tensor = pb_utils.Tensor("OUTPUT0", xgb_final_avg_pred)
xgb_final_avg_pred_tensor = pb_utils.Tensor.from_dlpack("output__0", xgb_final_avg_pred.toDlpack())
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
#
# Because the infer_response of the models contains the final
# outputs with correct output names, we can just pass the list
# of outputs to the InferenceResponse object.
inference_response = pb_utils.InferenceResponse(output_tensors=[xgb_final_avg_pred_tensor])
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
|
nvidia-gcp-samples-master
|
vertex-ai-samples/prediction/xgboost_ensemble/model_repository/bls_async/1/model.py
|
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import random
import logging
import numpy as np
from PIL import Image
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
def get_int8_calibrator(calib_cache, calib_data, max_calib_size, preprocess_func_name, calib_batch_size):
# Use calibration cache if it exists
if os.path.exists(calib_cache):
logger.info("Skipping calibration files, using calibration cache: {:}".format(calib_cache))
calib_files = []
# Use calibration files from validation dataset if no cache exists
else:
if not calib_data:
raise ValueError("ERROR: Int8 mode requested, but no calibration data provided. Please provide --calibration-data /path/to/calibration/files")
calib_files = get_calibration_files(calib_data, max_calib_size)
# Choose pre-processing function for INT8 calibration
import processing
if preprocess_func_name is not None:
preprocess_func = getattr(processing, preprocess_func_name)
else:
preprocess_func = processing.preprocess_imagenet
int8_calibrator = ImagenetCalibrator(calibration_files=calib_files,
batch_size=calib_batch_size,
cache_file=calib_cache,
preprocess_func=preprocess_func)
return int8_calibrator
def get_calibration_files(calibration_data, max_calibration_size=None, allowed_extensions=(".jpeg", ".jpg", ".png")):
"""Returns a list of all filenames ending with `allowed_extensions` found in the `calibration_data` directory.
Parameters
----------
calibration_data: str
Path to directory containing desired files.
max_calibration_size: int
Max number of files to use for calibration. If calibration_data contains more than this number,
a random sample of size max_calibration_size will be returned instead. If None, all samples will be used.
Returns
-------
calibration_files: List[str]
List of filenames contained in the `calibration_data` directory ending with `allowed_extensions`.
"""
logger.info("Collecting calibration files from: {:}".format(calibration_data))
calibration_files = [path for path in glob.iglob(os.path.join(calibration_data, "**"), recursive=True)
if os.path.isfile(path) and path.lower().endswith(allowed_extensions)]
logger.info("Number of Calibration Files found: {:}".format(len(calibration_files)))
if len(calibration_files) == 0:
raise Exception("ERROR: Calibration data path [{:}] contains no files!".format(calibration_data))
if max_calibration_size:
if len(calibration_files) > max_calibration_size:
logger.warning("Capping number of calibration images to max_calibration_size: {:}".format(max_calibration_size))
random.seed(42) # Set seed for reproducibility
calibration_files = random.sample(calibration_files, max_calibration_size)
return calibration_files
# https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/python_api/infer/Int8/EntropyCalibrator2.html
class ImagenetCalibrator(trt.IInt8EntropyCalibrator2):
"""INT8 Calibrator Class for Imagenet-based Image Classification Models.
Parameters
----------
calibration_files: List[str]
List of image filenames to use for INT8 Calibration
batch_size: int
Number of images to pass through in one batch during calibration
input_shape: Tuple[int]
Tuple of integers defining the shape of input to the model (Default: (3, 224, 224))
cache_file: str
Name of file to read/write calibration cache from/to.
preprocess_func: function -> numpy.ndarray
Pre-processing function to run on calibration data. This should match the pre-processing
done at inference time. In general, this function should return a numpy array of
shape `input_shape`.
"""
def __init__(self, calibration_files=[], batch_size=32, input_shape=(224, 224, 3),
cache_file="calibration.cache", preprocess_func=None):
super().__init__()
self.input_shape = input_shape
self.cache_file = cache_file
self.batch_size = batch_size
self.batch = np.zeros((self.batch_size, *self.input_shape), dtype=np.float32)
self.device_input = cuda.mem_alloc(self.batch.nbytes)
self.files = calibration_files
# Pad the list so it is a multiple of batch_size
if len(self.files) % self.batch_size != 0:
logger.info("Padding # calibration files to be a multiple of batch_size {:}".format(self.batch_size))
self.files += calibration_files[(len(calibration_files) % self.batch_size):self.batch_size]
self.batches = self.load_batches()
if preprocess_func is None:
logger.error("No preprocess_func defined! Please provide one to the constructor.")
sys.exit(1)
else:
self.preprocess_func = preprocess_func
def load_batches(self):
# Populates a persistent self.batch buffer with images.
for index in range(0, len(self.files), self.batch_size):
for offset in range(self.batch_size):
image = Image.open(self.files[index + offset])
self.batch[offset] = self.preprocess_func(image, *self.input_shape)
logger.info("Calibration images pre-processed: {:}/{:}".format(index+self.batch_size, len(self.files)))
yield self.batch
def get_batch_size(self):
return self.batch_size
def get_batch(self, names):
try:
# Assume self.batches is a generator that provides batch data.
batch = next(self.batches)
# Assume that self.device_input is a device buffer allocated by the constructor.
cuda.memcpy_htod(self.device_input, batch)
return [int(self.device_input)]
except StopIteration:
# When we're out of batches, we return either [] or None.
# This signals to TensorRT that there is no calibration data remaining.
return None
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
logger.info("Using calibration cache to save time: {:}".format(self.cache_file))
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
logger.info("Caching calibration data for future use: {:}".format(self.cache_file))
f.write(cache)
|
nvidia-gcp-samples-master
|
kubernetes-engine-samples/triton_gke/triton_server/tensorrt/docker/resnet/ImagenetCalibrator.py
|
nvidia-gcp-samples-master
|
kubernetes-engine-samples/triton_gke/triton_server/tensorrt/docker/resnet/__init__.py
|
|
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
from PIL import Image
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
def preprocess_imagenet(image, height=224, width=224, channels=3) :
"""Pre-processing for Imagenet-based Image Classification Models:
resnet50, vgg16, mobilenet, etc. (Doesn't seem to work for Inception)
Parameters
----------
image: PIL.Image
The image resulting from PIL.Image.open(filename) to preprocess
channels: int
The number of channels the image has (Usually 1 or 3)
height: int
The desired height of the image (usually 224 for Imagenet data)
width: int
The desired width of the image (usually 224 for Imagenet data)
Returns
-------
img_data: numpy array
The preprocessed image data in the form of a numpy array
"""
# Get the image in CHW format
resized_image = image.resize((width, height), Image.ANTIALIAS)
img_data = np.asarray(resized_image).astype(np.float32)
if len(img_data.shape) == 2:
img_data = np.stack([img_data] * 3, axis=-1)
logger.debug("Received grayscale image. Reshaped to {:}".format(img_data.shape))
assert img_data.shape[-1] == channels
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
for i in range(img_data.shape[-1]):
# Scale each pixel to [0, 1] and normalize per channel.
img_data[:, :, i] = (img_data[:, :, i] / 255 - mean_vec[i]) / stddev_vec[i]
return img_data
|
nvidia-gcp-samples-master
|
kubernetes-engine-samples/triton_gke/triton_server/tensorrt/docker/resnet/processing.py
|
#!/usr/bin/env python3
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import math
import logging
import argparse
import tensorrt as trt
from ImagenetCalibrator import ImagenetCalibrator, get_calibration_files, get_int8_calibrator # local module
TRT_LOGGER = trt.Logger()
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
def add_profiles(is_int8, config, inputs, opt_profiles):
logger.debug("=== Optimization Profiles ===")
for i, profile in enumerate(opt_profiles):
for inp in inputs:
_min, _opt, _max = profile.get_shape(inp.name)
logger.debug("{} - OptProfile {} - Min {} Opt {} Max {}".format(inp.name, i, _min, _opt, _max))
config.add_optimization_profile(profile)
if is_int8:
config.set_calibration_profile(profile)
def mark_outputs(network):
# Mark last layer's outputs if not already marked
# NOTE: This may not be correct in all cases
last_layer = network.get_layer(network.num_layers-1)
if not last_layer.num_outputs:
logger.error("Last layer contains no outputs.")
return
for i in range(last_layer.num_outputs):
network.mark_output(last_layer.get_output(i))
def check_network(network):
if not network.num_outputs:
logger.warning("No output nodes found, marking last layer's outputs as network outputs. Correct this if wrong.")
mark_outputs(network)
inputs = [network.get_input(i) for i in range(network.num_inputs)]
outputs = [network.get_output(i) for i in range(network.num_outputs)]
max_len = max([len(inp.name) for inp in inputs] + [len(out.name) for out in outputs])
logger.debug("=== Network Description ===")
for i, inp in enumerate(inputs):
logger.debug("Input {0} | Name: {1:{2}} | Shape: {3}".format(i, inp.name, max_len, inp.shape))
for i, out in enumerate(outputs):
logger.debug("Output {0} | Name: {1:{2}} | Shape: {3}".format(i, out.name, max_len, out.shape))
def get_batch_sizes(max_batch_size):
# Returns powers of 2, up to and including max_batch_size
max_exponent = math.log2(max_batch_size)
for i in range(int(max_exponent)+1):
batch_size = 2**i
yield batch_size
if max_batch_size != batch_size:
yield max_batch_size
# TODO: This only covers dynamic shape for batch size, not dynamic shape for other dimensions
def create_optimization_profiles(builder, inputs, batch_sizes, max_bs):
# Check if all inputs are fixed explicit batch to create a single profile and avoid duplicates
if all([inp.shape[0] > -1 for inp in inputs]):
profile = builder.create_optimization_profile()
for inp in inputs:
fbs, shape = inp.shape[0], inp.shape[1:]
profile.set_shape(inp.name, min=(1, *shape), opt=(fbs, *shape), max=(fbs, *shape))
return [profile]
# Otherwise for mixed fixed+dynamic explicit batch inputs, create several profiles
profiles = {}
for bs in batch_sizes:
if not profiles.get(bs):
profiles[bs] = builder.create_optimization_profile()
for inp in inputs:
shape = inp.shape[1:]
# Check if fixed explicit batch
if inp.shape[0] > -1:
bs = inp.shape[0]
profiles[bs].set_shape(inp.name, min=(1, *shape), opt=(bs, *shape), max=(max_bs, *shape))
return list(profiles.values())
def main():
parser = argparse.ArgumentParser(description="Creates a TensorRT engine from the provided ONNX file.\n")
parser.add_argument("--onnx", required=True, help="The ONNX model file to convert to TensorRT")
parser.add_argument("-o", "--output", type=str, default="model.engine", help="The path at which to write the engine")
parser.add_argument("-maxbs", "--max-batch-size", type=int, default=32, help="The max batch size for the TensorRT engine input")
parser.add_argument("-b", "--batch-size", type=int, action="append", default=[8, 16, 32], help="Batch size(s) to optimize for. The engine will be usable with any batch size below this, but may not be optimal for smaller sizes. Can be specified multiple times to optimize for more than one batch size.")
parser.add_argument("-v", "--verbosity", action="count", help="Verbosity for logging. (None) for ERROR, (-v) for INFO/WARNING/ERROR, (-vv) for VERBOSE.")
parser.add_argument("--explicit-batch", action='store_true', help="Set trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH.")
parser.add_argument("--explicit-precision", action='store_true', help="Set trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION.")
parser.add_argument("--gpu-fallback", action='store_true', help="Set trt.BuilderFlag.GPU_FALLBACK.")
parser.add_argument("--refittable", action='store_true', help="Set trt.BuilderFlag.REFIT.")
parser.add_argument("--debug", action='store_true', help="Set trt.BuilderFlag.DEBUG.")
parser.add_argument("--strict-types", action='store_true', help="Set trt.BuilderFlag.STRICT_TYPES.")
parser.add_argument("--fp16", action="store_true", help="Attempt to use FP16 kernels when possible.")
parser.add_argument("--int8", action="store_true", help="Attempt to use INT8 kernels when possible. This should generally be used in addition to the --fp16 flag. \
ONLY SUPPORTS RESNET-LIKE MODELS SUCH AS RESNET50/VGG16/INCEPTION/etc.")
parser.add_argument("--calibration-cache", help="(INT8 ONLY) The path to read/write from calibration cache.", default="calibration.cache")
parser.add_argument("--calibration-data", help="(INT8 ONLY) The directory containing {*.jpg, *.jpeg, *.png} files to use for calibration. (ex: Imagenet Validation Set)", default=None)
parser.add_argument("--calibration-batch-size", help="(INT8 ONLY) The batch size to use during calibration.", type=int, default=64)
parser.add_argument("--max-calibration-size", help="(INT8 ONLY) The max number of data to calibrate on from --calibration-data.", type=int, default=6400)
parser.add_argument("-p", "--preprocess_func", type=str, default=None, help="(INT8 ONLY) Function defined in 'processing.py' to use for pre-processing calibration data.")
args, _ = parser.parse_known_args()
# Adjust logging verbosity
if args.verbosity is None:
TRT_LOGGER.min_severity = trt.Logger.Severity.ERROR
# -v
elif args.verbosity == 1:
TRT_LOGGER.min_severity = trt.Logger.Severity.INFO
# -vv
else:
TRT_LOGGER.min_severity = trt.Logger.Severity.VERBOSE
logger.info("TRT_LOGGER Verbosity: {:}".format(TRT_LOGGER.min_severity))
# Network flags
network_flags = 0
if args.explicit_batch:
network_flags |= 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
if args.explicit_precision:
network_flags |= 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION)
builder_flag_map = {
'gpu_fallback': trt.BuilderFlag.GPU_FALLBACK,
'refittable': trt.BuilderFlag.REFIT,
'debug': trt.BuilderFlag.DEBUG,
'strict_types': trt.BuilderFlag.STRICT_TYPES,
'fp16': trt.BuilderFlag.FP16,
'int8': trt.BuilderFlag.INT8,
}
# Building engine
with trt.Builder(TRT_LOGGER) as builder, \
builder.create_network(network_flags) as network, \
builder.create_builder_config() as config, \
trt.OnnxParser(network, TRT_LOGGER) as parser:
config.max_workspace_size = int(2**33*1.5) # 1GiB
# Set Builder Config Flags
for flag in builder_flag_map:
if getattr(args, flag):
logger.info("Setting {}".format(builder_flag_map[flag]))
config.set_flag(builder_flag_map[flag])
if args.fp16 and not builder.platform_has_fast_fp16:
logger.warning("FP16 not supported on this platform.")
if args.int8 and not builder.platform_has_fast_int8:
logger.warning("INT8 not supported on this platform.")
if args.int8:
config.int8_calibrator = get_int8_calibrator(args.calibration_cache,
args.calibration_data,
args.max_calibration_size,
args.preprocess_func,
args.calibration_batch_size)
# Fill network atrributes with information by parsing model
with open(args.onnx, "rb") as f:
if not parser.parse(f.read()):
print('ERROR: Failed to parse the ONNX file: {}'.format(args.onnx))
for error in range(parser.num_errors):
print(parser.get_error(error))
sys.exit(1)
# Display network info and check certain properties
check_network(network)
if args.explicit_batch:
# Add optimization profiles
batch_sizes = args.batch_size
max_bs = args.max_batch_size
inputs = [network.get_input(i) for i in range(network.num_inputs)]
opt_profiles = create_optimization_profiles(builder, inputs, batch_sizes, max_bs)
add_profiles(args.int8, config, inputs, opt_profiles)
# Implicit Batch Network
else:
builder.max_batch_size = args.max_batch_size
logger.info("Building Engine...")
with builder.build_engine(network, config) as engine, open(args.output, "wb") as f:
logger.info("Serializing engine to file: {:}".format(args.output))
f.write(engine.serialize())
if __name__ == "__main__":
main()
|
nvidia-gcp-samples-master
|
kubernetes-engine-samples/triton_gke/triton_server/tensorrt/docker/resnet/onnx_to_tensorrt.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications copyright (C) 2020 NVIDIA Corp.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
nvidia-gcp-samples-master
|
kubernetes-engine-samples/triton_gke/triton_client/notebook_client/docker/client_src/tokenization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications copyright (C) 2020 NVIDIA Corp.
import tokenization
import collections
import numpy as np
import six
import math
def convert_doc_tokens(paragraph_text):
""" Return the list of tokens from the doc text """
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
doc_tokens = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
return doc_tokens
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def convert_examples_to_features(doc_tokens, question_text, tokenizer, max_seq_length,
doc_stride, max_query_length):
"""Loads a data file into a list of `InputBatch`s."""
query_tokens = tokenizer.tokenize(question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
def create_int_feature(values):
feature = np.asarray(values, dtype=np.int32, order=None)
return feature
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["tokens"] = tokens
features["token_to_orig_map"] = token_to_orig_map
features["token_is_max_context"] = token_is_max_context
return features
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def get_predictions(doc_tokens, features, start_logits, end_logits, n_best_size, max_answer_length):
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
prediction = ""
scores_diff_json = 0.0
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
start_indexes = _get_best_indexes(start_logits, n_best_size)
end_indexes = _get_best_indexes(end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
version_2_with_negative = True
if version_2_with_negative:
feature_null_score = start_logits[0] + end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = 0
null_start_logit = start_logits[0]
null_end_logit = end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(features['tokens']):
continue
if end_index >= len(features['tokens']):
continue
if start_index not in features['token_to_orig_map']:
continue
if end_index not in features['token_to_orig_map']:
continue
if not features['token_is_max_context'].get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=0,
start_index=start_index,
end_index=end_index,
start_logit=start_logits[start_index],
end_logit=end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = features['tokens'][pred.start_index:(pred.end_index + 1)]
orig_doc_start = features['token_to_orig_map'][pred.start_index]
orig_doc_end = features['token_to_orig_map'][pred.end_index]
orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, True)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
null_score_diff_threshold = 0.0
if not version_2_with_negative:
prediction = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json = score_diff
if score_diff > null_score_diff_threshold:
prediction = ""
else:
prediction = best_non_null_entry.text
return prediction, nbest_json, scores_diff_json
|
nvidia-gcp-samples-master
|
kubernetes-engine-samples/triton_gke/triton_client/notebook_client/docker/client_src/data_processing.py
|
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Any, Text, Tuple
import time
import logging
import apache_beam as beam
from apache_beam.utils import shared
from apache_beam.options.pipeline_options import PipelineOptions
class TrtModel():
def __init__(self, infer_context, vocab_file="vocab.txt"):
import helpers.tokenization as tokenization
self.infer_context = infer_context
self.vocab_file = vocab_file
self.tokenizer = tokenization.FullTokenizer(vocab_file="vocab.txt", do_lower_case=True)
self.do_lower_case = True
self.max_seq_length = 384
self.doc_stride = 128
self.max_query_length = 64
self.verbose_logging = True
self.version_2_with_negative = False
self.n_best_size = 20
self.max_answer_length = 30
class DoManualInference(beam.DoFn):
def __init__(self, shared_handle, engine_path, batch_size):
import collections
self._shared_handle = shared_handle
self._engine_path = engine_path
self._batch_size = batch_size
self._NetworkOutput = collections.namedtuple(
"NetworkOutput",
["start_logits", "end_logits", "feature_index"])
def setup(self):
from polygraphy.backend.trt import EngineFromBytes
from polygraphy.backend.trt import TrtRunner
# setup is a good place to initialize transient in-memory resources.
def initialize_model():
# Load a potentially large model in memory. Executed once per process.
build_engine = EngineFromBytes(open(self._engine_path, "rb").read())
runner = TrtRunner(build_engine)
runner.activate()
return TrtModel(runner)
self._trtModel = self._shared_handle.acquire(initialize_model)
def process(self, element: Tuple[Text, List[Text]]) -> List[Any]:
yield (self.predict(element))
def predict(self, inputs: Tuple[Text, List[Text]]) -> List[Any]:
import helpers.data_processing as dp
from polygraphy.backend.trt import TrtRunner
import numpy as np
import collections
import time
def question_features(tokens, question):
# Extract features from the paragraph and question
return dp.convert_example_to_features(tokens, question,
self._trtModel.tokenizer,
self._trtModel.max_seq_length,
self._trtModel.doc_stride,
self._trtModel.max_query_length)
features = []
doc_tokens = dp.convert_doc_tokens(inputs[0])
ques_list = inputs[1]
batch_size = len(ques_list)
if batch_size < 16:
# Pad the input batch to batch_size to match the model expected input.
pad = [ques_list[0]] * (16 - batch_size)
ques_list.extend(pad)
for question_text in ques_list:
features.append(question_features(doc_tokens, question_text)[0])
input_ids_batch = np.dstack([feature.input_ids for feature in features]).squeeze()
segment_ids_batch = np.dstack([feature.segment_ids for feature in features]).squeeze()
input_mask_batch = np.dstack([feature.input_mask for feature in features]).squeeze()
inputs = {
"input_ids": input_ids_batch,
"input_mask": input_mask_batch,
"segment_ids": segment_ids_batch
}
output = self._trtModel.infer_context.infer(inputs)
start_logits = output['cls_squad_logits'][:, :, 0, :, :]
end_logits = output['cls_squad_logits'][:, :, 1, :, :]
networkOutputs = [self._NetworkOutput(
start_logits=start_logits[i, :],
end_logits=end_logits[i, :],
feature_index=0) for i in range(self._batch_size)]
predictions = []
for feature, networkOutput in zip(features, networkOutputs):
prediction, _, _ = dp.get_predictions(doc_tokens, [feature],
[networkOutput], self._trtModel.n_best_size,
self._trtModel.max_answer_length)
predictions.append(prediction)
return ["[Q]: " + ques + " [A]:" + prediction for ques, prediction in zip(ques_list, predictions)]
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
pipeline_options = PipelineOptions(save_main_session=True)
question_list = [("""TensorRT is a high performance deep learning inference platform
that delivers low latency and high throughput for apps such as
recommenders, speech and image/video on NVIDIA GPUs. It includes
parsers to import models, and plugins to support novel ops and
layers before applying optimizations for inference. Today NVIDIA
is open-sourcing parsers and plugins in TensorRT so that the deep
learning community can customize and extend these components to
take advantage of powerful TensorRT optimizations for your apps.""",
["What is TensorRT?", "Is TensorRT open sourced?", "Who is open sourcing TensorRT?",
"What does TensorRT deliver?"] * 4)] * 4000
engine_path = "/workspace/trt_beam/bert_large_seq384_bs16_trt2011.engine"
start_time = time.time()
with beam.Pipeline(options=pipeline_options) as p:
shared_handle = shared.Shared()
_ = (p | beam.Create(question_list)
| beam.ParDo(DoManualInference(shared_handle=shared_handle, engine_path=engine_path, batch_size=16))
| beam.Map(print)
)
logging.info(f"--- {time.time() - start_time} seconds ---")
logging.info(f"--- {len(question_list) * 16.0 // (time.time() - start_time)} questions/seconds ---")
|
nvidia-gcp-samples-master
|
dataflow-samples/bert-qa-trt-dataflow/bert_squad2_qa_trt.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.