diff --git a/.gitattributes b/.gitattributes index 0637990b37ccb6df7dabced0895f421683d385c4..9f3032bf516e0f57d9ea17d9fb05bf93e3dfad10 100644 --- a/.gitattributes +++ b/.gitattributes @@ -870,3 +870,5 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/image_ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_sparse_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/array_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_image_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_experimental_dataset_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_tpu_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/backprop.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/backprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21e55f78fce5db26c4b7b096d22ba0c472821279 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/backprop.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/benchmarks_test_base.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/benchmarks_test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78a1c355a09095b86286ecda3ed3a4e93de4f59 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/benchmarks_test_base.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/core.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2186c9e555ccbd12df1872279a9baca412851b73 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/core.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/def_function.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/def_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..613555688bf2209504f99af0722bda1e6854f3fd Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/def_function.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/forwardprop.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/forwardprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..496ed07427a16bfedd8c9e5814dcd1f4e1a1dee1 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/forwardprop.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/function.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0e82e987335d8f889aae4045f5e023149cc3ed1 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/function.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/graph_only_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/graph_only_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dec79ae58d7af9c0508ed42415ff19624d4fdd08 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/graph_only_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/imperative_grad.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/imperative_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8bdf146c3fb1d0505b8c8fb17872853f147240f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/imperative_grad.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/monitoring.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/monitoring.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954984b3928db6b944ee8f578c5c32c2c1c65176 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/monitoring.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/record.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/record.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11587dd13773a7bc295c7523884ae9ac369738c6 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/record.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/tape.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/tape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa19d86a385385bfbbe26beed71fbb866825d523 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/tape.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/test.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..193279608b503033ed2cbdf39b3f60bfc7f0c4d7 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/__pycache__/test.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/backprop.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/backprop.py new file mode 100644 index 0000000000000000000000000000000000000000..57593eac09b26dd2936c5e356f9b80373421c444 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/backprop.py @@ -0,0 +1,1345 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Code for backpropagation using the tape utilities.""" + +# TODO(b/159343581): Properly support CompositeTensor in all functions in this +# file. + +import functools +import operator + +from tensorflow.python import pywrap_tfe +from tensorflow.python.eager import backprop_util +from tensorflow.python.eager import context +from tensorflow.python.eager import execute +from tensorflow.python.eager import imperative_grad +from tensorflow.python.eager import tape +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import composite_tensor_gradient +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.framework import type_spec +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_util +from tensorflow.python.ops import default_gradient +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gradients_impl # pylint: disable=unused-import +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_ops +from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util import nest +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util import tf_inspect +from tensorflow.python.util import variable_utils +from tensorflow.python.util.tf_export import tf_export + + +_op_attr_type_cache = {} + + +def op_attr_type(op_type, attr_name): + try: + return _op_attr_type_cache[(op_type, attr_name)] + except KeyError: + context.ensure_initialized() + h = context.context()._handle # pylint: disable=protected-access + attr_type = pywrap_tfe.TFE_OpNameGetAttrType(h, op_type, attr_name) + _op_attr_type_cache[(op_type, attr_name)] = attr_type + return attr_type + + +def make_attr(attr_type, value): + # pybind11 enums do not return the raw value like SWIG enums do. They are + # useful when comparing amongst each other but not direct integers as we are + # doing in most tests. + # https://pybind11.readthedocs.io/en/stable/classes.html#enumerations-and-internal-types + # TODO(amitpatankar): After all SWIG transitions, convert the enum comparisons + # from integer value to class. + if attr_type == int(pywrap_tfe.TF_ATTR_TYPE): + return dtypes.as_dtype(value) + if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]: + return [dtypes.as_dtype(v) for v in value] + if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE): + return tensor_shape.as_shape(value).as_proto() + if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]: + return [tensor_shape.as_shape(v).as_proto() for v in value] + return nest.map_structure( + lambda v: v.encode() if isinstance(v, str) else v, + value) + + +class _MockOp(object): + """Pretends to be a tf.Operation for the gradient functions.""" + + def __init__(self, attrs, inputs, outputs, typ, skip_input_indices): + self.attrs = attrs + self.inputs = inputs + self.outputs = outputs + self.type = typ + self.skip_input_indices = skip_input_indices + + def get_attr(self, attr): + typ = op_attr_type(self.type, attr) + for i in range(0, len(self.attrs), 2): + if self.attrs[i] == attr: + return make_attr(typ, self.attrs[i + 1]) + raise KeyError(attr) + + def _get_control_flow_context(self): + raise NotImplementedError( + "tf.GradientTape.gradients() does not support graph control flow " + "operations like tf.cond or tf.while at this time. Use tf.gradients() " + "instead. If you need this feature, please file a feature request at " + "https://github.com/tensorflow/tensorflow/issues/new" + ) + + +def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs, + out_grads, skip_input_indices, forward_pass_name_scope): + """Calls the gradient function of the op. + + Args: + op_name: the name of the op to be differentiated. + attr_tuple: the attrs, as a tuple. + num_inputs: the number of inputs to the op. + inputs: inputs to the original operation. + outputs: outputs to the original operation. + out_grads: gradients of the operation wrt its outputs. + skip_input_indices: a tuple that is passed to the gradient function, + indicating which inputs to skip calculating the gradient for + forward_pass_name_scope: the namescope of the op in the forward pass. + + Returns: + The gradients with respect to the inputs of the function, as a list. + """ + mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices) + grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access + if grad_fn is None: + return [None] * num_inputs + + # This does not work with v1 TensorArrays. + if ops.executing_eagerly_outside_functions( + ) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()): + gradient_name_scope = "gradient_tape/" + if forward_pass_name_scope: + gradient_name_scope += forward_pass_name_scope + "/" + with ops.name_scope(gradient_name_scope): + return grad_fn(mock_op, *out_grads) + else: + return grad_fn(mock_op, *out_grads) + + +pywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function) + + +def _must_record_gradient(): + return not pywrap_tfe.TFE_Py_TapeSetIsEmpty() + + +@tf_export("__internal__.record_gradient", v1=[]) +def record_gradient(op_name, inputs, attrs, outputs): + """Explicitly record the gradient for a given op. + + Args: + op_name: The op name as listed in the `OpDef` for the op. + inputs: A list of tensor inputs to the op. + attrs: The op attributes as a flattened list of alternating attribute names + and attribute values. + outputs: A list of tensor outputs from the op. + """ + pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs, + ops.get_name_scope()) + + +execute.must_record_gradient = _must_record_gradient +execute.record_gradient = record_gradient + + +def implicit_val_and_grad(f): + """Returns a function which differentiates f with respect to variables. + + The wrapped function returns the value and the gradient of f when called with + the same arguments. The gradient is with respect to all trainable TFE + variables accessed by `f`. + + This function is useful when the exact set of variables to differentiate with + is not known ahead of time. + + Example: + + ```python + dense_layer = tf.compat.v1.layers.Dense(1) + def loss(x, y): + return tf.reduce_sum(tf.square(dense_layer(x) - y)) + + # Obtain the gradient function. + val_grad_fn = tfe.implicit_value_and_gradients(loss) + + # Invoke the gradient function with concrete values of x and y. + x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + y = tf.constant([[10.0], [20.0]]) + value, grads_and_vars = val_grad_fn(x, y) + print('Value of loss: %s' % value) + + # Apply the gradients to Variables. + optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) + optimizer.apply_gradients(grads_and_vars) + ``` + + Args: + f: function to be differentiated. If `f` returns a scalar, this scalar will + be differentiated. If `f` returns a tensor or list of tensors, by default + a scalar will be computed by adding all their values to produce a single + scalar. + + Returns: + A function which, when called, returns a tuple pair. + Its first element is the value to which the function evaluates. + Its second element is list of (gradient, variable) pairs. + + Raises: + ValueError: if `f` returns None. + """ + # TODO(cais): Remove calls to tf.constant() once the gradients functions + # accept lists and np.ndarrays. + + def grad_fn(*args, **kwds): + """Computes the gradient of the wrapped function.""" + this_tape = tape.push_new_tape() + try: + end_node = f(*args, **kwds) + if end_node is None: + raise ValueError("Cannot differentiate a function that returns None; " + "did you forget to return a value from {}?".format( + f.__name__)) + finally: + tape.pop_tape(this_tape) + # Note: variables are returned in construction order. This ensures unique + # order across executions. + variables = this_tape.watched_variables() + if not variables: + raise ValueError("No trainable variables were accessed while the " + "function was being computed.") + + sources = [v.handle for v in variables] + for s in sources: + if getattr(s, "is_packed", False): + raise ValueError( + "GradientTape.gradient is not supported on packed EagerTensors yet." + ) + grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node), + sources) + return end_node, list(zip(grad, variables)) + + return grad_fn + + +def implicit_grad(f): + """Returns a function which differentiates f with respect to variables. + + The wrapped function returns the gradient of f when called with the same + arguments. The gradient is with respect to all trainable TFE variables + accessed by `f`. + + This function is useful when the exact set of variables to differentiate with + is not known ahead of time. + + Example: + + ```python + dense_layer = tf.compat.v1.layers.Dense(1) + def loss(x, y): + return tf.reduce_sum(tf.square(dense_layer(x) - y)) + + # Obtain the gradient function. + grad_fn = tfe.implicit_gradients(loss) + + # Invoke the gradient function with concrete values of x and y. + x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + y = tf.constant([[10.0], [20.0]]) + grads_and_vars = grad_fn(x, y) + + # Apply the gradients to Variables. + optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) + optimizer.apply_gradients(grads_and_vars) + ``` + + Args: + f: function to be differentiated. If `f` returns a scalar, this scalar will + be differentiated. If `f` returns a tensor or list of tensors, by default + a scalar will be computed by adding all their values to produce a single + scalar. + + Returns: + A function which, when called, returns a list of (gradient, variable) pairs. + """ + # TODO(cais): Remove calls to tf.constant() once the gradients functions + # accept lists and np.ndarrays. + + def grad_fn(*args, **kwds): + """Computes the gradient of the wrapped function.""" + return implicit_val_and_grad(f)(*args, **kwds)[1] + + return grad_fn + + +def _get_arg_spec(f, params, param_args): + """The positions of the parameters of f to be differentiated in param_args.""" + try: + args = tf_inspect.getfullargspec(f).args + except TypeError as e: + # TypeError can happen when f is a callable object. + if params is None: + return range(len(param_args)) + elif all(isinstance(x, int) for x in params): + return params + raise ValueError("Either callable provided is not a function or could not " + "inspect its arguments by name: %s. Original error: %s" + % (f, e)) + if params is None: + if not args: + return range(len(param_args)) + if args[0] == "self": + return range(len(args) - 1) + else: + return range(len(args)) + elif all(isinstance(x, str) for x in params): + return [args.index(n) for n in params] + elif all(isinstance(x, int) for x in params): + return params + else: + raise ValueError( + "params must be all strings or all integers; got %s." % params) + + +def gradients_function(f, params=None): + """Returns a function which differentiates f with respect to params. + + Example: + ```python + # f(x, y) = (x ^ 3) * y - x * (y ^ 2) + # Therefore, the 1st order derivatives are: + # df / dx = 3 * (x ^ 2) * y - y ^ 2 + # df / dy = x ^ 3 - 2 * x * y + # The 2nd order derivatives with respect to x is: + # d^2 f / (dx)^2 = 6 * x * y + def f(x, y): + return x * x * x * y - x * y * y + + # Obtain a function that returns 1st order gradients. + grad_fn = tfe.gradients_function(f) + + x = 2.0 + y = 3.0 + + # Invoke the 1st order gradient function. + x_grad, y_grad = grad_fn(x, y) + assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2 + assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 + + # Obtain a function that returns the 2nd order gradient with respect to x. + gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0]) + + # Invoke the 2nd order gradient function. + x_gradgrad = gradgrad_fn(x, y)[0] + assert x_gradgrad.numpy() == 6 * 2 * 3 + + # To obtain a callable that returns the gradient(s) of `f` with respect to a + # subset of its inputs, use the `params` keyword argument with + # `gradients_function()`. + ygrad_fn = tfe.gradients_function(f, params=[1]) + + (y_grad,) = ygrad_fn(x, y) + assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 + ``` + + Note that only tensors with real or complex dtypes are differentiable. + + Args: + f: function to be differentiated. If `f` returns a scalar, this scalar will + be differentiated. If `f` returns a tensor or list of tensors, by default + a scalar will be computed by adding all their values to produce a single + scalar. If desired, the tensors can be elementwise multiplied by the + tensors passed as the `dy` keyword argument to the returned gradient + function. + params: list of parameter names of f or list of integers indexing the + parameters with respect to which we'll differentiate. Passing None + differentiates with respect to all parameters. + + Returns: + function which, when called, returns the value of f and the gradient + of `f` with respect to all of `params`. The function takes an extra optional + keyword argument `dy`. Setting it allows computation of vector jacobian + products for vectors other than the vector of ones. + + Raises: + ValueError: if the params are not all strings or all integers. + """ + + def decorated(*args, **kwds): + """Computes the gradient of the decorated function.""" + + _, grad = val_and_grad_function(f, params=params)(*args, **kwds) + return grad + + return decorated + + +def _ensure_unique_tensor_objects(parameter_positions, args): + """Make each of the parameter_positions in args a unique tensor_lib.Tensor object. + + Ensure that each parameter is treated independently. + For example: + + def f(x, y): return x * y + g = gradients_function(f) + one = tf.constant(1.) + + g(one, one) should return [1., 1.] + (even though the two arguments are the same Tensor object). + + Args: + parameter_positions: List of indices into args defining the arguments to + differentiate against. + args: A list of arguments to the function to be differentiated. + + Returns: + args, possibly edited in-place. + """ + s = set() + for (i, t) in enumerate(args): + if i in parameter_positions: + tid = ops.tensor_id(t) + if tid in s: + args[i] = gen_array_ops.identity(args[i]) + else: + s.add(tid) + return args + + +def val_and_grad_function(f, params=None): + """Returns a function that computes f and its derivative w.r.t. params. + + Example: + ```python + # f(x, y) = (x ^ 3) * y - x * (y ^ 2) + # Therefore, the 1st order derivatives are: + # df / dx = 3 * (x ^ 2) * y - y ^ 2 + # df / dy = x ^ 3 - 2 * x * y + def f(x, y): + return x * x * x * y - x * y * y + + # Obtain a function that returns the function value and the 1st order + # gradients. + val_grads_fn = tfe.value_and_gradients_function(f) + + x = 2.0 + y = 3.0 + + # Invoke the value-and-gradients function. + f_val, (x_grad, y_grad) = val_grads_fn(x, y) + assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2) + assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2 + assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 + + # To obtain a callable that returns the value of `f` and the gradient(s) of + # `f` with respect to a subset of its inputs, use the `params` keyword + # argument with `value_and_gradients_function()`. + val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1]) + + f_val, (y_grad,) = val_ygrad_fn(x, y) + assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2) + assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 + ``` + + Args: + f: function to be differentiated. If `f` returns a scalar, this scalar will + be differentiated. If `f` returns a tensor or list of tensors, by default + a scalar will be computed by adding all their values to produce a single + scalar. If desired, the tensors can be elementwise multiplied by the + tensors passed as the `dy` keyword argument to the returned gradient + function. + params: list of parameter names of f or list of integers indexing the + parameters with respect to which we'll differentiate. Passing `None` + differentiates with respect to all parameters. + + Returns: + function which, when called, returns the value of f and the gradient + of f with respect to all of `params`. The function takes an extra optional + keyword argument "dy". Setting it allows computation of vector jacobian + products for vectors other than the vector of ones. + + Raises: + ValueError: if the params are not all strings or all integers. + """ + + def decorated(*args, **kwds): + """Computes the value and gradient of the decorated function.""" + dy = kwds.pop("dy", None) + if kwds: + raise ValueError("Functions to be differentiated cannot " + "receive keyword arguments.") + val, vjp = make_vjp(f, params)(*args, **kwds) + return val, vjp(dy=dy) + + return decorated + + +def make_vjp(f, params=None, persistent=True): + """Returns a function that computes f and its vjp w.r.t. + + params. + + The term "vjp" here is an abbreviation for vector-jacobian product. + + Args: + f: the function to be differentiated. + params: the parameters (numbers or names) to differentiate with respect to. + A value of None will differentiate with respect to all parameters. + persistent: Boolean controlling whether the VJP function can be re-used. + Must be True or False. + + Returns: + A function, which when called, returns a tuple (value, vjp), where: + - value is the result of calling f. + - vjp is a function, which takes a vector as an argument and + returns the product of that vector with the Jacobian of f. + Providing no argument to vjp is equivalent to providing a + vector of ones. + + For example, + ```python + def f(x): + return x * x + + wrapped_fn = tfe.make_vjp(f) + result, vjp = wrapped_fn(tf.constant(3.0)) + # result is 9.0 + vjp() # the vjp function returns 6.0 + + Raises: + ValueError: if `f` returns None. + """ + + def decorated(*args, **kwds): + """Computes the value and gradient of the decorated function.""" + parameter_positions = _get_arg_spec(f, params, args) + assert not kwds, "The gradient function can't take keyword arguments." + this_tape = tape.push_new_tape(persistent=persistent) + try: + sources = [] + args = [ + ops.convert_to_tensor(arg) if i in parameter_positions else arg + for i, arg in enumerate(args) + ] + args = _ensure_unique_tensor_objects(parameter_positions, args) + for i in parameter_positions: + if getattr(args[i], "is_packed", False): + raise ValueError( + "GradientTape.gradient is not supported on packed EagerTensors" + "yet.") + sources.append(args[i]) + tape.watch(this_tape, args[i]) + result = f(*args) + if result is None: + raise ValueError("Cannot differentiate a function that returns None; " + "did you forget to return a value from {}?".format( + f.__name__)) + flat_result = nest.flatten(result) + flat_result = [gen_array_ops.identity(x) for x in flat_result] + result = nest.pack_sequence_as(result, flat_result) + finally: + tape.pop_tape(this_tape) + def vjp(dy=None): + if dy is not None: + dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)] + return imperative_grad.imperative_grad( + this_tape, nest.flatten(result), sources, output_gradients=dy) + + return result, vjp + + return decorated + + +def _aggregate_grads(gradients): + """Aggregate gradients from multiple sources. + + Args: + gradients: A list of 'Tensor' or 'IndexedSlices' gradients. + + Returns: + If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'. + Otherwise returns an aggregated 'IndexedSlices'. + """ + assert gradients, "No gradients to aggregate" + + if len(gradients) == 1: + return gradients[0] + if all(isinstance(g, tensor_lib.Tensor) for g in gradients): + return gen_math_ops.add_n(gradients) + else: + assert all( + isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) + for g in gradients) + return backprop_util.AggregateIndexedSlicesGradients(gradients) + + +def _num_elements(grad): + """The number of elements in the `grad` tensor.""" + if isinstance(grad, tensor_lib.Tensor): + shape_tuple = grad._shape_tuple() # pylint: disable=protected-access + elif isinstance(grad, indexed_slices.IndexedSlices): + shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access + else: + raise ValueError("`grad` not a Tensor or IndexedSlices.") + if shape_tuple is None or None in shape_tuple: + return 0 + return functools.reduce(operator.mul, shape_tuple, 1) + + +def _fast_fill(value, shape, dtype): + return array_ops.fill( + constant_op.constant(shape, dtype=dtypes.int32), + constant_op.constant(value, dtype=dtype)) + + +def _zeros(shape, dtype): + """Helper to return (possibly cached) zero tensors in eager mode.""" + # Note: variants will use _zeros_like + if dtype == dtypes.string or dtype == dtypes.resource: + return None + + ctx = context.context() + if not ctx.executing_eagerly(): + return array_ops.zeros(shape, dtype) + + device = ctx.device_name + + if tensor_util.is_tf_type(shape): + shape_key = shape.ref() + else: + shape_key = shape + cache_key = shape_key, dtype, device + cached = ctx.zeros_cache().get(cache_key) + if cached is None: + if dtypes.as_dtype(dtype).is_bool: + value = False + else: + value = 0 + cached = _fast_fill(value, shape, dtype) + ctx.zeros_cache().put(cache_key, cached) + return cached + + +def _ones(shape, dtype): + as_dtype = dtypes.as_dtype(dtype) + if as_dtype == dtypes.string: + return None + + if not context.executing_eagerly(): + return array_ops.ones(shape, dtype) + + if as_dtype.is_bool: + value = True + else: + value = 1 + + if shape == (): # pylint: disable=g-explicit-bool-comparison + return constant_op.constant(value, dtype=dtype) + return _fast_fill(value, shape, dtype) + + +_default_vspace = imperative_grad.VSpace( + num_elements_fn=_num_elements, + aggregate_fn=_aggregate_grads, + zeros_fn=_zeros, + ones_fn=_ones, + zeros_like_fn=default_gradient.zeros_like, + ones_like_fn=default_gradient.ones_like, + graph_shape_fn=gen_array_ops.shape) +pywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace) + + +def _handle_or_self(x): + """Unwrap resource variable/ndarray to return tensors.""" + if resource_variable_ops.is_resource_variable(x): + return x.handle + return x + + +def _extract_tensors_and_variables(tensor): + """Extracts tensors and variables from the input object.""" + for obj in nest.flatten(tensor): + if _pywrap_utils.IsTensor(obj) or _pywrap_utils.IsVariable(obj): + yield obj + elif isinstance(obj, composite_tensor.CompositeTensor): + components = type_spec.type_spec_from_value(obj)._to_components(obj) # pylint: disable=protected-access + yield from _extract_tensors_and_variables(components) + else: + raise ValueError(f"Passed in object {obj} of type {type(obj).__name__!r}" + f", not tf.Tensor or tf.Variable or ExtensionType.") + + +@tf_export("GradientTape", "autodiff.GradientTape", v1=["GradientTape"]) +class GradientTape: + """Record operations for automatic differentiation. + + Operations are recorded if they are executed within this context manager and + at least one of their inputs is being "watched". + + Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`, + where `trainable=True` is default in both cases) are automatically watched. + Tensors can be manually watched by invoking the `watch` method on this context + manager. + + For example, consider the function `y = x * x`. The gradient at `x = 3.0` can + be computed as: + + >>> x = tf.constant(3.0) + >>> with tf.GradientTape() as g: + ... g.watch(x) + ... y = x * x + >>> dy_dx = g.gradient(y, x) + >>> print(dy_dx) + tf.Tensor(6.0, shape=(), dtype=float32) + + GradientTapes can be nested to compute higher-order derivatives. For example, + + >>> x = tf.constant(5.0) + >>> with tf.GradientTape() as g: + ... g.watch(x) + ... with tf.GradientTape() as gg: + ... gg.watch(x) + ... y = x * x + ... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x + >>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2 + >>> print(dy_dx) + tf.Tensor(10.0, shape=(), dtype=float32) + >>> print(d2y_dx2) + tf.Tensor(2.0, shape=(), dtype=float32) + + By default, the resources held by a GradientTape are released as soon as + GradientTape.gradient() method is called. To compute multiple gradients over + the same computation, create a persistent gradient tape. This allows multiple + calls to the gradient() method as resources are released when the tape object + is garbage collected. For example: + + >>> x = tf.constant(3.0) + >>> with tf.GradientTape(persistent=True) as g: + ... g.watch(x) + ... y = x * x + ... z = y * y + >>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3) + >>> print(dz_dx) + tf.Tensor(108.0, shape=(), dtype=float32) + >>> dy_dx = g.gradient(y, x) + >>> print(dy_dx) + tf.Tensor(6.0, shape=(), dtype=float32) + + By default GradientTape will automatically watch any trainable variables that + are accessed inside the context. If you want fine grained control over which + variables are watched you can disable automatic tracking by passing + `watch_accessed_variables=False` to the tape constructor: + + >>> x = tf.Variable(2.0) + >>> w = tf.Variable(5.0) + >>> with tf.GradientTape( + ... watch_accessed_variables=False, persistent=True) as tape: + ... tape.watch(x) + ... y = x ** 2 # Gradients will be available for `x`. + ... z = w ** 3 # No gradients will be available as `w` isn't being watched. + >>> dy_dx = tape.gradient(y, x) + >>> print(dy_dx) + tf.Tensor(4.0, shape=(), dtype=float32) + >>> # No gradients will be available as `w` isn't being watched. + >>> dz_dw = tape.gradient(z, w) + >>> print(dz_dw) + None + + Note that when using models you should ensure that your variables exist when + using `watch_accessed_variables=False`. Otherwise it's quite easy to make your + first iteration not have any gradients: + + ```python + a = tf.keras.layers.Dense(32) + b = tf.keras.layers.Dense(32) + + with tf.GradientTape(watch_accessed_variables=False) as tape: + tape.watch(a.variables) # Since `a.build` has not been called at this point + # `a.variables` will return an empty list and the + # tape will not be watching anything. + result = b(a(inputs)) + tape.gradient(result, a.variables) # The result of this computation will be + # a list of `None`s since a's variables + # are not being watched. + ``` + + Note that only tensors with real or complex dtypes are differentiable. + """ + + def __init__(self, persistent=False, watch_accessed_variables=True): + """Creates a new GradientTape. + + Args: + persistent: Boolean controlling whether a persistent gradient tape + is created. False by default, which means at most one call can + be made to the gradient() method on this object. + watch_accessed_variables: Boolean controlling whether the tape will + automatically `watch` any (trainable) variables accessed while the tape + is active. Defaults to True meaning gradients can be requested from any + result computed in the tape derived from reading a trainable `Variable`. + If False users must explicitly `watch` any `Variable`s they want to + request gradients from. + """ + self._tape = None + self._persistent = persistent + self._watch_accessed_variables = watch_accessed_variables + self._watched_variables = () + self._recording = False + + def __enter__(self): + """Enters a context inside which operations are recorded on this tape.""" + self._push_tape() + return self + + def __exit__(self, typ, value, traceback): + """Exits the recording context, no further operations are traced.""" + if self._recording: + self._pop_tape() + + def _push_tape(self): + """Pushes a new tape onto the tape stack.""" + if self._recording: + raise ValueError("Tape is still recording, This can happen if you try to " + "re-enter an already-active tape.") + if self._tape is None: + self._tape = tape.push_new_tape( + persistent=self._persistent, + watch_accessed_variables=self._watch_accessed_variables) + else: + tape.push_tape(self._tape) + self._recording = True + + def _pop_tape(self): + if not self._recording: + raise ValueError("Tape is not recording.") + tape.pop_tape(self._tape) + self._recording = False + + @tf_contextlib.contextmanager + def _ensure_recording(self): + """Ensures that this tape is recording.""" + if not self._recording: + try: + self._push_tape() + yield + finally: + self._pop_tape() + else: + yield + + # TODO(b/209081027): Add a variable in composite tensor test case after + # variables become composite tensors. + def watch(self, tensor): + """Ensures that `tensor` is being traced by this tape. + + Args: + tensor: a Tensor/Variable or list of Tensors/Variables. + + Raises: + ValueError: if it encounters something that is not a tensor. + """ + for t in _extract_tensors_and_variables(tensor): + if not backprop_util.IsTrainable(t): + logging.log_first_n( + logging.WARN, "The dtype of the watched tensor must be " + "floating (e.g. tf.float32), got %r", 5, t.dtype) + if hasattr(t, "handle"): + # There are many variable-like objects, all of them currently have + # `handle` attribute that points to a tensor. If this changes, + # internals of watch_variable need to change as well. + tape.watch_variable(self._tape, t) + else: + tape.watch(self._tape, t) + + @tf_contextlib.contextmanager + def stop_recording(self): + """Temporarily stops recording operations on this tape. + + Operations executed while this context manager is active will not be + recorded on the tape. This is useful for reducing the memory used by tracing + all computations. + + For example: + + >>> x = tf.constant(4.0) + >>> with tf.GradientTape() as tape: + ... with tape.stop_recording(): + ... y = x ** 2 + >>> dy_dx = tape.gradient(y, x) + >>> print(dy_dx) + None + + Yields: + None + Raises: + RuntimeError: if the tape is not currently recording. + """ + if self._tape is None: + raise RuntimeError( + "Trying to stop recording a tape which is not recording.") + self._pop_tape() + try: + yield + finally: + self._push_tape() + + def reset(self): + """Clears all information stored in this tape. + + Equivalent to exiting and reentering the tape context manager with a new + tape. For example, the two following code blocks are equivalent: + + ``` + with tf.GradientTape() as t: + loss = loss_fn() + with tf.GradientTape() as t: + loss += other_loss_fn() + t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn + + + # The following is equivalent to the above + with tf.GradientTape() as t: + loss = loss_fn() + t.reset() + loss += other_loss_fn() + t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn + ``` + + This is useful if you don't want to exit the context manager for the tape, + or can't because the desired reset point is inside a control flow construct: + + ``` + with tf.GradientTape() as t: + loss = ... + if loss > k: + t.reset() + ``` + """ + self._pop_tape() + self._tape = None + self._push_tape() + + def watched_variables(self): + """Returns variables watched by this tape in order of construction.""" + if self._tape is not None: + self._watched_variables = self._tape.watched_variables() + return self._watched_variables + + def gradient(self, + target, + sources, + output_gradients=None, + unconnected_gradients=UnconnectedGradients.NONE): + """Computes the gradient using operations recorded in context of this tape. + + Note: Unless you set `persistent=True` a GradientTape can only be used to + compute one set of gradients (or jacobians). + + In addition to Tensors, gradient also supports RaggedTensors. For example, + + >>> x = tf.ragged.constant([[1.0, 2.0], [3.0]]) + >>> with tf.GradientTape() as g: + ... g.watch(x) + ... y = x * x + >>> g.gradient(y, x) + + + Args: + target: a list or nested structure of Tensors or Variables or + CompositeTensors to be differentiated. + sources: a list or nested structure of Tensors or Variables or + CompositeTensors. `target` will be differentiated against elements in + `sources`. + output_gradients: a list of gradients, one for each differentiable + element of target. Defaults to None. + unconnected_gradients: a value which can either hold 'none' or 'zero' and + alters the value which will be returned if the target and sources are + unconnected. The possible values and effects are detailed in + 'UnconnectedGradients' and it defaults to 'none'. + + Returns: + a list or nested structure of Tensors (or IndexedSlices, or None, or + CompositeTensor), one for each element in `sources`. Returned structure + is the same as the structure of `sources`. + + Raises: + RuntimeError: If called on a used, non-persistent tape. + RuntimeError: If called inside the context of the tape. + TypeError: If the target is a None object. + ValueError: If the target is a variable or if unconnected gradients is + called with an unknown value. + """ + if self._tape is None: + raise RuntimeError("A non-persistent GradientTape can only be used to " + "compute one set of gradients (or jacobians)") + if self._recording: + if not self._persistent: + self._pop_tape() + else: + logging.log_first_n( + logging.WARN, "Calling GradientTape.gradient on a persistent " + "tape inside its context is significantly less " + "efficient than calling it outside the context (it " + "causes the gradient ops to be recorded on the " + "tape, leading to increased CPU and memory usage). " + "Only call GradientTape.gradient inside the " + "context if you actually want to trace the " + "gradient in order to compute higher order " + "derivatives.", 1) + + if target is None: + raise TypeError("Argument `target` should be a list or nested structure" + " of Tensors, Variables or CompositeTensors to be " + "differentiated, but received None.") + + flat_targets = composite_tensor_gradient.get_flat_tensors_for_gradients( + nest.flatten(target)) + # TODO(b/246997907): Remove this once + # ResourceVariableGradient.get_gradient_components returns the handle. + flat_targets = nest.map_structure(_handle_or_self, flat_targets) + + for t in flat_targets: + if not backprop_util.IsTrainable(t): + logging.vlog( + 1, "The dtype of the target tensor must be " + "floating (e.g. tf.float32) when calling GradientTape.gradient, " + "got %r", t.dtype) + + flat_sources_raw = nest.flatten(sources) + flat_sources = [] + for t in flat_sources_raw: + flat_sources.append(_handle_or_self(t)) + flat_sources = composite_tensor_gradient.get_flat_tensors_for_gradients( + flat_sources) + for t in flat_sources: + if not backprop_util.IsTrainable(t): + logging.vlog( + 1, "The dtype of the source tensor must be " + "floating (e.g. tf.float32) when calling GradientTape.gradient, " + "got %r", t.dtype) + if getattr(t, "is_packed", False): + raise ValueError( + "GradientTape.gradient is not supported on packed EagerTensors yet." + ) + + if output_gradients is not None: + output_gradients = nest.flatten( + variable_utils.convert_variables_to_tensors(output_gradients)) + output_gradients = ( + composite_tensor_gradient.get_flat_tensors_for_gradients( + output_gradients)) + output_gradients = [None if x is None else ops.convert_to_tensor(x) + for x in output_gradients] + + flat_grad = imperative_grad.imperative_grad( + self._tape, + flat_targets, + flat_sources, + output_gradients=output_gradients, + sources_raw=flat_sources_raw, + unconnected_gradients=unconnected_gradients) + + if not self._persistent: + # Keep track of watched variables before setting tape to None + self._watched_variables = self._tape.watched_variables() + self._tape = None + + flat_sources_raw = nest.map_structure(_handle_or_self, flat_sources_raw) + flat_grad = composite_tensor_gradient.replace_flat_tensors_for_gradients( + flat_sources_raw, flat_grad) + grad = nest.pack_sequence_as(sources, flat_grad) + return grad + + def jacobian(self, + target, + sources, + unconnected_gradients=UnconnectedGradients.NONE, + parallel_iterations=None, + experimental_use_pfor=True): + """Computes the jacobian using operations recorded in context of this tape. + + Note: Unless you set `persistent=True` a GradientTape can only be used to + compute one set of gradients (or jacobians). + + Note: By default the jacobian implementation uses parallel for (pfor), which + creates a tf.function under the hood for each jacobian call. For better + performance, and to avoid recompilation and vectorization rewrites on each + call, enclose GradientTape code in @tf.function. + + See[wikipedia + article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) + for the definition of a Jacobian. + + Example usage: + + ```python + with tf.GradientTape() as g: + x = tf.constant([1.0, 2.0]) + g.watch(x) + y = x * x + jacobian = g.jacobian(y, x) + # jacobian value is [[2., 0.], [0., 4.]] + ``` + + Args: + target: Tensor to be differentiated. + sources: a list or nested structure of Tensors or Variables. `target` + will be differentiated against elements in `sources`. + unconnected_gradients: a value which can either hold 'none' or 'zero' and + alters the value which will be returned if the target and sources are + unconnected. The possible values and effects are detailed in + 'UnconnectedGradients' and it defaults to 'none'. + parallel_iterations: A knob to control how many iterations are dispatched + in parallel. This knob can be used to control the total memory usage. + experimental_use_pfor: If true, vectorizes the jacobian computation. Else + falls back to a sequential while_loop. Vectorization can sometimes fail + or lead to excessive memory usage. This option can be used to disable + vectorization in such cases. + + Returns: + A list or nested structure of Tensors (or None), one for each element in + `sources`. Returned structure is the same as the structure of `sources`. + Note if any gradient is sparse (IndexedSlices), jacobian function + currently makes it dense and returns a Tensor instead. This may change in + the future. + + + Raises: + RuntimeError: If called on a used, non-persistent tape. + RuntimeError: If called on a non-persistent tape with eager execution + enabled and without enabling experimental_use_pfor. + ValueError: If vectorization of jacobian computation fails. + """ + if self._tape is None: + raise RuntimeError("A non-persistent GradientTape can only be used to " + "compute one set of gradients (or jacobians)") + + flat_sources = nest.flatten(sources) + target_static_shape = target.shape + target_shape = array_ops.shape(target) + # Note that we push and pop the tape here and below. This is needed since we + # need gradients through the enclosed operations. + with self._ensure_recording(): + target = array_ops.reshape(target, [-1]) + + def loop_fn(i): + with self._ensure_recording(): + y = array_ops.gather(target, i) + return self.gradient(y, flat_sources, + unconnected_gradients=unconnected_gradients) + + try: + target_size = int(target.shape[0]) + except TypeError: + target_size = array_ops.shape(target)[0] + + if experimental_use_pfor: + try: + output = pfor_ops.pfor(loop_fn, target_size, + parallel_iterations=parallel_iterations) + except ValueError as err: + raise ValueError( + "Encountered an exception while vectorizing the " + "jacobian computation. Vectorization can be disabled by setting" + " experimental_use_pfor to False.") from err + else: + if context.executing_eagerly() and not self._persistent: + raise RuntimeError( + "GradientTape must be created with persistent=True" + " to compute the jacobian with eager execution enabled and with " + " experimental_use_pfor set to False.") + output = pfor_ops.for_loop( + loop_fn, [target.dtype] * len(flat_sources), target_size, + parallel_iterations=parallel_iterations) + + for i, out in enumerate(output): + if out is not None: + new_shape = array_ops.concat( + [target_shape, array_ops.shape(out)[1:]], axis=0) + out = array_ops.reshape(out, new_shape) + if context.executing_eagerly(): + out.set_shape(target_static_shape.concatenate(flat_sources[i].shape)) + output[i] = out + + return nest.pack_sequence_as(sources, output) + + def batch_jacobian(self, + target, + source, + unconnected_gradients=UnconnectedGradients.NONE, + parallel_iterations=None, + experimental_use_pfor=True): + """Computes and stacks per-example jacobians. + + See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) + for the definition of a Jacobian. This function is essentially an efficient + implementation of the following: + + `tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`. + + Note that compared to `GradientTape.jacobian` which computes gradient of + each output value w.r.t each input value, this function is useful when + `target[i,...]` is independent of `source[j,...]` for `j != i`. This + assumption allows more efficient computation as compared to + `GradientTape.jacobian`. The output, as well as intermediate activations, + are lower dimensional and avoid a bunch of redundant zeros which would + result in the jacobian computation given the independence assumption. + + Note: Unless you set `persistent=True` a GradientTape can only be used to + compute one set of gradients (or jacobians). + + Note: By default the batch_jacobian implementation uses parallel for (pfor), + which creates a tf.function under the hood for each batch_jacobian call. + For better performance, and to avoid recompilation and vectorization + rewrites on each call, enclose GradientTape code in @tf.function. + + + Example usage: + + ```python + with tf.GradientTape() as g: + x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32) + g.watch(x) + y = x * x + batch_jacobian = g.batch_jacobian(y, x) + # batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]] + ``` + + Args: + target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n]. + `target[i,...]` should only depend on `source[i,...]`. + source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m]. + unconnected_gradients: a value which can either hold 'none' or 'zero' and + alters the value which will be returned if the target and sources are + unconnected. The possible values and effects are detailed in + 'UnconnectedGradients' and it defaults to 'none'. + parallel_iterations: A knob to control how many iterations are dispatched + in parallel. This knob can be used to control the total memory usage. + experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else + uses a tf.while_loop. + + Returns: + A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]` + is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked + per-example jacobians. + + Raises: + RuntimeError: If called on a used, non-persistent tape. + RuntimeError: If called on a non-persistent tape with eager execution + enabled and without enabling experimental_use_pfor. + ValueError: If vectorization of jacobian computation fails or if first + dimension of `target` and `source` do not match. + """ + if self._tape is None: + raise RuntimeError("A non-persistent GradientTape can only be used to" + "compute one set of gradients (or jacobians)") + target_shape = target.shape + if target_shape.rank is None: + dim = tensor_shape.Dimension(None) + else: + dim = target_shape.dims[0] + if not (target_shape.with_rank_at_least(2) and + source.shape.with_rank_at_least(2) and + dim.is_compatible_with(source.shape[0])): + raise ValueError( + "Need first dimension of target shape (%s) and " + "source shape (%s) to match." % (target.shape, source.shape)) + if target_shape.is_fully_defined(): + batch_size = int(target_shape[0]) + target_row_size = target_shape.num_elements() // batch_size + else: + target_shape = array_ops.shape(target) + batch_size = target_shape[0] + target_row_size = array_ops.size(target) // batch_size + source_shape = array_ops.shape(source) + # Flatten target to 2-D. + # Note that we push and pop the tape here and below. This is needed since we + # need gradients through the enclosed operations. + with self._ensure_recording(): + with ops.control_dependencies( + [check_ops.assert_equal(batch_size, source_shape[0])]): + target = array_ops.reshape(target, [batch_size, target_row_size]) + + run_once = False + + def loop_fn(i): + nonlocal run_once + if run_once and not self._persistent: + if parallel_iterations is not None: + raise RuntimeError( + "GradientTape must be created with persistent=True" + " to compute the batch_jacobian with parallel_iterations.") + else: + raise RuntimeError( + "GradientTape must be created with persistent=True" + " to compute the batch_jacobian.") + run_once = True + + with self._ensure_recording(): + y = array_ops.gather(target, i, axis=1) + return self.gradient(y, source, + unconnected_gradients=unconnected_gradients) + + if experimental_use_pfor: + try: + output = pfor_ops.pfor(loop_fn, target_row_size, + parallel_iterations=parallel_iterations) + except ValueError as err: + raise ValueError( + "Encountered an exception while vectorizing the " + "batch_jacobian computation. Vectorization can be disabled by " + "setting experimental_use_pfor to False.") from err + else: + if context.executing_eagerly() and not self._persistent: + raise RuntimeError( + "GradientTape must be created with persistent=True" + " to compute the batch_jacobian with eager execution enabled and " + " with experimental_use_pfor set to False.") + output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size, + parallel_iterations=parallel_iterations) + new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0) + if output is None: + # Note that this block is returning zeros when it could use `None` to + # represent unconnected gradients. This is to maintain compatibility with + # the previous behavior, which ignored `unconnected_gradients`. + output = array_ops.zeros(new_shape, target.dtype) + return output + else: + output = array_ops.reshape(output, + [target_row_size, batch_size, -1]) + output = array_ops.transpose(output, [1, 0, 2]) + + output = array_ops.reshape(output, new_shape) + return output diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/backprop_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/backprop_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c4fe1158dc3c8e35ed9f1dfcc0f654d4c9285b86 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/backprop_util.py @@ -0,0 +1,105 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Shared utilities related to backprop.""" + +from tensorflow.core.config import flags +from tensorflow.core.framework import types_pb2 +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import handle_data_util +from tensorflow.python.ops import math_ops + + +def _DTypeFromTensor(tensor): + """Extract either `tensor.dtype` or the unanimous sub-type of a variant.""" + dtype = tensor.dtype + if dtype.base_dtype == dtypes.variant: + # If we know statically that the data a variant points to is non-trainable + # then the variant itself is non-trainable. + if isinstance(tensor, ops.EagerTensor): + handle_data = tensor._handle_data # pylint: disable=protected-access + else: + handle_data = handle_data_util.get_resource_handle_data(tensor) + if (handle_data is not None + and handle_data.is_set + and handle_data.shape_and_type): + first_type = handle_data.shape_and_type[0].dtype + # Some variants have statically unknown dtypes; we can't make inferences + # about trainability, so we conservatively assume they're trainable + # (which may waste memory passing zeros around, but will be correct). + if (first_type != types_pb2.DT_INVALID + and all(shape_and_type.dtype == first_type + for shape_and_type in handle_data.shape_and_type)): + return first_type + return dtype + + +def IsTrainable(tensor_or_dtype): + """Determines whether a tensor or dtype supports infinitesimal changes.""" + if tensor_util.is_tf_type(tensor_or_dtype): + dtype = _DTypeFromTensor(tensor_or_dtype) + else: + dtype = tensor_or_dtype + dtype = dtypes.as_dtype(dtype) + trainable_dtypes = [dtypes.float16, dtypes.float32, dtypes.float64, + dtypes.complex64, dtypes.complex128, dtypes.resource, + dtypes.variant, dtypes.bfloat16] + if flags.config().enable_quantized_dtypes_training.value(): + trainable_dtypes.extend([dtypes.qint8, dtypes.qint16, dtypes.qint32, + dtypes.quint8, dtypes.quint16]) + return dtype.base_dtype in trainable_dtypes + + +def FlattenNestedIndexedSlices(grad): + assert isinstance(grad, indexed_slices.IndexedSlices) + if isinstance(grad.values, tensor_lib.Tensor): + return grad + else: + assert isinstance(grad.values, indexed_slices.IndexedSlices) + g = FlattenNestedIndexedSlices(grad.values) + return indexed_slices.IndexedSlices( + g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape) + + +def AggregateIndexedSlicesGradients(grads): + """Aggregates gradients containing `IndexedSlices`s.""" + if len(grads) < 1: + return None + if len(grads) == 1: + return grads[0] + grads = [g for g in grads if g is not None] + # If any gradient is a `Tensor`, sum them up and return a dense tensor + # object. + if any(isinstance(g, tensor_lib.Tensor) for g in grads): + return math_ops.add_n(grads) + + # The following `_as_indexed_slices_list` casts ids of IndexedSlices into + # int64. It is to make sure the inputs of `concat` all have same the data + # type. + grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access + + grads = [FlattenNestedIndexedSlices(x) for x in grads] + # Form IndexedSlices out of the concatenated values and indices. + concat_grad = indexed_slices.IndexedSlices( + array_ops.concat([x.values for x in grads], axis=0), + array_ops.concat([x.indices for x in grads], axis=0), + grads[0].dense_shape) + + return concat_grad + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/benchmarks_test_base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/benchmarks_test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..14ccf1578f62ec9603468df06d2772d640451ff0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/benchmarks_test_base.py @@ -0,0 +1,77 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +r"""Benchmark base to run and report benchmark results.""" + +import os +import uuid + +from tensorflow.python.eager import test +from tensorflow.python.platform import flags +from tensorflow.python.profiler import profiler_v2 as profiler + +flags.DEFINE_bool("xprof", False, "Run and report benchmarks with xprof on") +flags.DEFINE_string("logdir", "/tmp/xprof/", "Directory to store xprof data") + + +class MicroBenchmarksBase(test.Benchmark): + """Run and report benchmark results. + + The first run is without any profilng. + Second run is with xprof and python trace. Third run is with xprof without + python trace. Note: xprof runs are with fewer iterations. + """ + + def run_with_xprof(self, enable_python_trace, run_benchmark, func, + num_iters_xprof, execution_mode, suid): + if enable_python_trace: + options = profiler.ProfilerOptions(python_tracer_level=1) + logdir = os.path.join(flags.FLAGS.logdir, suid + "_with_python") + else: + options = profiler.ProfilerOptions(python_tracer_level=0) + logdir = os.path.join(flags.FLAGS.logdir, suid) + with profiler.Profile(logdir, options): + total_time = run_benchmark(func, num_iters_xprof, execution_mode) + us_per_example = float("{0:.3f}".format(total_time * 1e6 / num_iters_xprof)) + return logdir, us_per_example + + def run_report(self, run_benchmark, func, num_iters, execution_mode=None): + """Run and report benchmark results.""" + total_time = run_benchmark(func, num_iters, execution_mode) + mean_us = total_time * 1e6 / num_iters + extras = { + "examples_per_sec": float("{0:.3f}".format(num_iters / total_time)), + "us_per_example": float("{0:.3f}".format(total_time * 1e6 / num_iters)) + } + + if flags.FLAGS.xprof: + suid = str(uuid.uuid4()) + # Re-run with xprof and python trace. + num_iters_xprof = min(100, num_iters) + xprof_link, us_per_example = self.run_with_xprof(True, run_benchmark, + func, num_iters_xprof, + execution_mode, suid) + extras["xprof link with python trace"] = xprof_link + extras["us_per_example with xprof and python"] = us_per_example + + # Re-run with xprof but no python trace. + xprof_link, us_per_example = self.run_with_xprof(False, run_benchmark, + func, num_iters_xprof, + execution_mode, suid) + extras["xprof link"] = xprof_link + extras["us_per_example with xprof"] = us_per_example + + benchmark_name = self._get_benchmark_name() + self.report_benchmark( + iters=num_iters, wall_time=mean_us, extras=extras, name=benchmark_name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/cancellation.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/cancellation.py new file mode 100644 index 0000000000000000000000000000000000000000..a8956d7a683507d3d90eab8f59d60fe9ed638b69 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/cancellation.py @@ -0,0 +1,62 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Cancellation support for eager execution.""" + +from tensorflow.python import pywrap_tfe + + +class CancellationManager(object): + """A mechanism for cancelling blocking computation.""" + + __slots__ = ["_impl"] + + def __init__(self): + self._impl = pywrap_tfe.TFE_NewCancellationManager() + + @property + def is_cancelled(self): + """Returns `True` if `CancellationManager.start_cancel` has been called.""" + return pywrap_tfe.TFE_CancellationManagerIsCancelled(self._impl) + + def start_cancel(self): + """Cancels blocking operations that have been registered with this object.""" + pywrap_tfe.TFE_CancellationManagerStartCancel(self._impl) + + def get_cancelable_function(self, concrete_function): + def cancellable(*args, **kwargs): + with CancellationManagerContext(self): + return concrete_function(*args, **kwargs) + return cancellable + +_active_context = None + + +def context(): + return _active_context + + +class CancellationManagerContext: + """A Python context for wrapping a cancellable ConcreteFunction.""" + + def __init__(self, cancellation_manager): + self._cancellation_manager = cancellation_manager + + def __enter__(self): + global _active_context + _active_context = self._cancellation_manager + + def __exit__(self, exc_type, exc_value, exc_tb): + global _active_context + _active_context = None diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/context.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/context.py new file mode 100644 index 0000000000000000000000000000000000000000..19a4cb258c582fd992afbf23eb471b31c45cb878 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/context.py @@ -0,0 +1,2971 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""State management for eager execution.""" + +import collections +import contextlib +import copy +import gc +import itertools +import os +import random +import threading + +from absl import logging +import numpy as np + +from tensorflow.core.framework import function_pb2 +from tensorflow.core.framework import graph_debug_info_pb2 +from tensorflow.core.protobuf import config_pb2 +from tensorflow.core.protobuf import rewriter_config_pb2 +from tensorflow.python import pywrap_tfe +from tensorflow.python import tf2 +from tensorflow.python.client import pywrap_tf_session +from tensorflow.python.eager import cancellation +from tensorflow.python.eager import execute +from tensorflow.python.eager import executor +from tensorflow.python.eager import monitoring +from tensorflow.python.framework import c_api_util +from tensorflow.python.framework import device as pydev +from tensorflow.python.framework import tfrt_utils +from tensorflow.python.util import compat +from tensorflow.python.util import function_utils +from tensorflow.python.util import is_in_graph_mode +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util.deprecation import deprecated +from tensorflow.python.util.tf_export import tf_export +from tensorflow.tsl.protobuf import coordination_config_pb2 + + +# TODO(b/307794935): Remove after a solution is found. +is_oss = True # updated by copybara + +GRAPH_MODE = 0 +EAGER_MODE = 1 + +default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE + +# Cache from (old_device_name, partial_new_device_name) -> (new_device_name, +# new_device_spec). +# Note that we do not protect this with a lock and instead rely on python's GIL +# and the idempotent nature of writes to provide thread safety. +_device_parsing_cache = {} +_starting_device_spec = pydev.DeviceSpec.from_string("") + +_MAXINT32 = 2**31 - 1 + +DEVICE_PLACEMENT_EXPLICIT = pywrap_tfe.TFE_DEVICE_PLACEMENT_EXPLICIT +DEVICE_PLACEMENT_WARN = pywrap_tfe.TFE_DEVICE_PLACEMENT_WARN +DEVICE_PLACEMENT_SILENT = pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT +DEVICE_PLACEMENT_SILENT_FOR_INT32 = ( + pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32) + +SYNC = 0 +ASYNC = 1 + +_KEEP_ALIVE_SECS = 600 + +_python_eager_context_create_counter = monitoring.Counter( + "/tensorflow/api/python/eager_context_create_counter", + "Counter for number of eager contexts created in Python.") + +# Re-exporting through context. +is_tfrt_enabled = tfrt_utils.enabled + +# This flag and the associated environment var are transient and will eventually +# be removed, once this experiment is enabled by default. +_JIT_COMPILE_REWRITE_ENABLED = os.getenv("TF_JIT_COMPILE_REWRITE") == "1" + + +def run_eager_op_as_function_enabled(): + return True + + +# This method should only be called after the context has beein initialized. +def enable_jit_compile_rewrite(): + """Run jit_compile functions through rewrite pass. + + This runs jit_compile functions through all of the multidevice function + rewrite passes. + """ + global _JIT_COMPILE_REWRITE_ENABLED + _JIT_COMPILE_REWRITE_ENABLED = True + if context_safe() is not None: + context_safe().jit_compile_rewrite = True + + +# This method should only be called after the context has been initialized. +def disable_jit_compile_rewrite(): + global _JIT_COMPILE_REWRITE_ENABLED + _JIT_COMPILE_REWRITE_ENABLED = False + if context_safe() is not None: + context_safe().jit_compile_rewrite = False + + +def jit_compile_rewrite_enabled(): + if context_safe() is not None: + return context_safe().jit_compile_rewrite + return _JIT_COMPILE_REWRITE_ENABLED + + +# Expose it as internally public APIs for Keras use cases in b/171080602. +tf_export("__internal__.is_tfrt_enabled", v1=[])(is_tfrt_enabled) + + +class _EagerTensorCache(object): + """Simple cache which evicts items based on length in a FIFO manner.""" + + __slots__ = ["_data", "_max_items", "_max_tensor_size"] + + def __init__(self, max_items=256, max_tensor_size=10000): + self._data = collections.OrderedDict() + self._max_items = max_items + self._max_tensor_size = max_tensor_size + + def put(self, key, value): + if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access + return + + self._data[key] = value + + if len(self._data) > self._max_items: + self._data.popitem(last=False) + + def get(self, key): + return self._data.get(key, None) + + def flush(self): + self._data.clear() + + +class FunctionCallOptions: + """Options applied at call sites of eager functions. + + Eager functions are functions decorated with tf.contrib.eager.defun. + """ + + __slots__ = ["_config_proto_serialized", "_executor_type"] + + def __init__(self, executor_type=None, config_proto=None): + """Constructor. + + Args: + executor_type: (optional) name of the executor to be used to execute the + eager function. If None or an empty string, the default Tensorflow + executor will be used. + config_proto: (optional) a `config_pb2.ConfigProto` proto or a serialized + string of that proto. The config used by Grappler when optimizing the + function graph. Each concrete function is optimized the first time is + called. Changing config_proto after the first call has no effect. If + config_proto is None, an empty RewriterConfig will be used. + """ + self.config_proto_serialized = config_proto + self.executor_type = executor_type + + @property + def executor_type(self): + return self._executor_type + + @executor_type.setter + def executor_type(self, executor_type): + self._executor_type = executor_type + + @property + def config_proto_serialized(self): + return self._config_proto_serialized + + @config_proto_serialized.setter + def config_proto_serialized(self, config): + if isinstance(config, config_pb2.ConfigProto): + self._config_proto_serialized = config.SerializeToString( + deterministic=True) + elif isinstance(config, str): + self._config_proto_serialized = config + elif config is None: + self._config_proto_serialized = ( + config_pb2.ConfigProto().SerializeToString()) + else: + raise ValueError("the rewriter config must be either a " + "config_pb2.ConfigProto, or a serialized string of that " + "proto or None. got: {}".format(type(config))) + + def as_attrs(self): + if self.config_proto_serialized is None: + config = function_utils.get_disabled_rewriter_config() + else: + config = self.config_proto_serialized + executor_type = self.executor_type or "" + + return {"executor_type": executor_type, "config_proto": config} + + +# Map from context_id (an int) to _TensorCaches. +# Dicts are thread safe in CPython. +# TODO(iga): Remove this once TensorCaches are moved to C++. +_tensor_caches_map = {} + + +class _TensorCaches(threading.local): + """Thread local tensor caches.""" + + __slots__ = ["_ones_rank_cache", "_zeros_cache"] + + def __init__(self): + super().__init__() + self._ones_rank_cache = None + self._zeros_cache = None + + @property + def ones_rank_cache(self): + if not self._ones_rank_cache: + self._ones_rank_cache = _EagerTensorCache() + return self._ones_rank_cache + + @property + def zeros_cache(self): + if not self._zeros_cache: + self._zeros_cache = _EagerTensorCache() + return self._zeros_cache + + +ContextSwitch = collections.namedtuple( + "ContextSwitch", + ["is_building_function", "enter_context_fn", "device_stack"]) + + +# `_ContextSwitchStack` is a `threading.local` to match the semantics of +# ``DefaultGraphStack`, which is also a `threading.local`. +class _ContextSwitchStack(threading.local): + """A thread-local stack of context switches.""" + + def __init__(self, eager): + super().__init__() + self.stack = [] + if eager: + # Initialize the stack with a pointer to enter the eager context; this + # ensures that the fact that eager execution was enabled is propagated + # across threads, since (1) `enable_eager_execution` modifies a + # process-level flag (`default_execution_mode`) and (2) `__init__` is + # called each time a threading.local object is used in a separate thread. + self.push( + is_building_function=False, + enter_context_fn=eager_mode, + device_stack=None) + + def push(self, is_building_function, enter_context_fn, device_stack): + """Push metadata about a context switch onto the stack. + + A context switch can take any one of the two forms: installing a graph as + the default graph, or entering the eager context. For each context switch, + we record whether or not the entered context is building a function. + + Args: + is_building_function: (bool.) Whether the context is building a function. + enter_context_fn: (function.) A callable that executes the context switch. + For example, `graph.as_default` or `eager_mode`. + device_stack: If applicable, the device function stack for this graph. + When breaking out of graphs in init_scope, the innermost nonempty device + stack is used. Eager contexts put `None` here and the value is never + used. + """ + + self.stack.append( + ContextSwitch(is_building_function, enter_context_fn, device_stack)) + + def pop(self): + """Pop the stack.""" + + self.stack.pop() + + +@tf_export("config.LogicalDevice") +class LogicalDevice( + collections.namedtuple("LogicalDevice", ["name", "device_type"])): + """Abstraction for a logical device initialized by the runtime. + + A `tf.config.LogicalDevice` corresponds to an initialized logical device on a + `tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors + and operations can be placed on a specific logical device by calling + `tf.device` with a specified `tf.config.LogicalDevice`. + + Fields: + name: The fully qualified name of the device. Can be used for Op or function + placement. + device_type: String declaring the type of device such as "CPU" or "GPU". + """ + + +@tf_export("config.LogicalDeviceConfiguration", + "config.experimental.VirtualDeviceConfiguration") +class LogicalDeviceConfiguration( + collections.namedtuple("LogicalDeviceConfiguration", [ + "memory_limit", "experimental_priority", "experimental_device_ordinal" + ])): + """Configuration class for a logical devices. + + The class specifies the parameters to configure a `tf.config.PhysicalDevice` + as it is initialized to a `tf.config.LogicalDevice` during runtime + initialization. Not all fields are valid for all device types. + + See `tf.config.get_logical_device_configuration` and + `tf.config.set_logical_device_configuration` for usage examples. + + Fields: + memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual + device. Currently only supported for GPUs. + experimental_priority: (optional) Priority to assign to a virtual device. + Lower values have higher priorities and 0 is the default. + Within a physical GPU, the GPU scheduler will prioritize ops on virtual + devices with higher priority. Currently only supported for Nvidia GPUs. + experimental_device_ordinal: (optional) Ordinal number to order the virtual + device. + LogicalDevice with lower ordinal number will receive a lower device id. + Physical device id and location in the list is used to break ties. + Currently only supported for Nvidia GPUs. + """ + + def __new__(cls, + memory_limit=None, + experimental_priority=None, + experimental_device_ordinal=None): + return super().__new__(cls, memory_limit, experimental_priority, + experimental_device_ordinal) + + +@tf_export("config.PhysicalDevice") +class PhysicalDevice( + collections.namedtuple("PhysicalDevice", ["name", "device_type"])): + """Abstraction for a locally visible physical device. + + TensorFlow can utilize various devices such as the CPU or multiple GPUs + for computation. Before initializing a local device for use, the user can + customize certain properties of the device such as it's visibility or memory + configuration. + + Once a visible `tf.config.PhysicalDevice` is initialized one or more + `tf.config.LogicalDevice` objects are created. Use + `tf.config.set_visible_devices` to configure the visibility of a physical + device and `tf.config.set_logical_device_configuration` to configure multiple + `tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is + useful when separation between models is needed or to simulate a multi-device + environment. + + Fields: + name: Unique identifier for device. + device_type: String declaring the type of device such as "CPU" or "GPU". + """ + pass + + +class _AtomicCounter(object): + """A simple atomic counter.""" + + __slots__ = ["_value", "_lock"] + + def __init__(self): + self._value = 0 + self._lock = threading.Lock() + + def increment_and_get(self): + with self._lock: + self._value += 1 + return self._value + + +_context_id_counter = _AtomicCounter() + + +class _TensorCacheDeleter(object): + """Deletes tensor caches for a given context.""" + + __slots__ = ["_context_id"] + + def __init__(self, context_id): + self._context_id = context_id + + def __del__(self): + if _tensor_caches_map is None: + return + if self._context_id in _tensor_caches_map: + del _tensor_caches_map[self._context_id] + + +# TODO(agarwal): rename to EagerContext / EagerRuntime ? +# TODO(agarwal): consider keeping the corresponding Graph here. +class Context: + """Environment in which eager operations execute.""" + + # TODO(agarwal): create and link in some documentation for `execution_mode`. + # pylint: disable=redefined-outer-name + def __init__(self, + config=None, + device_policy=None, + execution_mode=None, + server_def=None): + """Creates a new Context. + + Args: + config: (Optional.) A `ConfigProto` protocol buffer with configuration + options for the Context. Note that a lot of these options may be + currently unimplemented or irrelevant when eager execution is enabled. + device_policy: (Optional.) What policy to use when trying to run an + operation on a device with inputs which are not on that device. When set + to None, an appropriate value will be picked automatically. The value + picked may change between TensorFlow releases. Defaults to + DEVICE_PLACEMENT_SILENT. + Valid values: + - DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not + correct. + - DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right + device but raises a warning. + - DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might hide + performance problems. + - DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors, + raising errors on the other ones. + execution_mode: (Optional.) Policy controlling how operations dispatched + are actually executed. When set to None, an appropriate value will be + picked automatically. The value picked may change between TensorFlow + releases. + Valid values: + - SYNC: executes each operation synchronously. + - ASYNC: executes each operation asynchronously. These operations may + return "non-ready" handles. + server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution + on remote devices. GrpcServers need to be started by creating an + identical server_def to this, and setting the appropriate task_indexes, + so that the servers can communicate. It will then be possible to execute + operations on remote devices. + + Raises: + ValueError: If execution_mode is not valid. + """ + # This _id is used only to index the tensor caches. + # TODO(iga): Remove this when tensor caches are moved to C++. + self._id = _context_id_counter.increment_and_get() + self._tensor_cache_deleter = _TensorCacheDeleter(self._id) + _tensor_caches_map[self._id] = _TensorCaches() + + self._config = config + self._thread_local_data = pywrap_tfe.EagerContextThreadLocalData( + self, + is_eager=lambda: default_execution_mode == EAGER_MODE, + device_spec=_starting_device_spec) + self._context_switches = _ContextSwitchStack(self.executing_eagerly()) + self._context_handle = None + self._context_devices = None + self._seed = None + self._initialize_lock = threading.Lock() + self._initialized = False + if device_policy is None: + device_policy = DEVICE_PLACEMENT_SILENT + self._device_policy = device_policy + self._mirroring_policy = None + if execution_mode not in (None, SYNC, ASYNC): + raise ValueError("execution_mode should be None/SYNC/ASYNC. Got %s" % + execution_mode) + if execution_mode is None: + execution_mode = SYNC + self._default_is_async = execution_mode == ASYNC + self._use_tfrt = is_tfrt_enabled() + self._jit_compile_rewrite = jit_compile_rewrite_enabled() + self._server_def = server_def + self._collective_ops_server_def = None + self._collective_leader = None + self._collective_scoped_allocator_enabled_ops = None + self._collective_use_nccl_communication = None + self._collective_device_filters = None + self._coordination_service_config = None + + self._device_lock = threading.Lock() + self._physical_devices = None + self._physical_device_to_index = None + self._pluggable_devices = None + self._visible_device_list = [] + self._memory_growth_map = None + self._virtual_device_map = {} + + # Values set after construction + self._optimizer_jit = None + self._intra_op_parallelism_threads = None + self._inter_op_parallelism_threads = None + self._soft_device_placement = None + self._log_device_placement = None + self._operation_timeout_in_ms = None + self._enable_mlir_graph_optimization = None + self._optimizer_experimental_options = {} + + _python_eager_context_create_counter.get_cell().increase_by(1) + + self._is_global_context = False + + # Number of retries to give the SetServerDef step. This is useful for fault + # tolerant initial connection in high-preemption settings like + # ParameterServerStrategy training. + self._set_server_def_retries = 0 + + # pylint: enable=redefined-outer-name + + def _set_global_seed(self, seed): + """Set a global eager mode seed for random ops.""" + self._seed = seed + # `random.Random(seed)` needs `seed` to be hashable, while values of type + # e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them + # to int. + try: + hash(seed) + self._rng = random.Random(seed) + except TypeError: + seed = int(np.array(seed)) + self._rng = random.Random(seed) + # Also clear the kernel cache, to reset any existing seeds + if self._context_handle is not None: + pywrap_tfe.TFE_ContextClearCaches(self._context_handle) + + def _internal_operation_seed(self): + """Returns a fake operation seed. + + In eager mode, user shouldn't set or depend on operation seed. + Here, we generate a random seed based on global seed to make + operation's randomness different and depend on the global seed. + + Returns: + A fake operation seed based on global seed. + """ + return self._rng.randint(0, _MAXINT32) + + def _initialize_logical_devices(self): + """Helper to initialize devices.""" + # Store list of devices + logical_devices = [] + context_devices = [] + device_list = pywrap_tfe.TFE_ContextListDevices(self._context_handle) + try: + self._num_gpus = 0 + current_job, current_task = None, None + server_def = self._server_def or self._collective_ops_server_def + if server_def is not None: + current_job, current_task = server_def.job_name, server_def.task_index + for i in range(pywrap_tfe.TF_DeviceListCount(device_list)): + dev_name = pywrap_tfe.TF_DeviceListName(device_list, i) + context_devices.append(pydev.canonical_name(dev_name)) + spec = pydev.DeviceSpec.from_string(dev_name) + # If the job is localhost, we assume that the cluster has not yet been + # configured and thus clear the job, replica & task. + if spec.job == "localhost": + spec = spec.replace(job=None, replica=None, task=None) + logical_devices.append( + LogicalDevice(name=spec.to_string(), device_type=spec.device_type)) + dev_type = pywrap_tfe.TF_DeviceListType(device_list, i) + if (dev_type == "GPU" and spec.job == current_job and + spec.task == current_task): + self._num_gpus += 1 + + finally: + self._logical_devices = logical_devices + self._context_devices = context_devices + pywrap_tfe.TF_DeleteDeviceList(device_list) + + def ensure_initialized(self): + """Initialize handle and devices if not already done so.""" + if self._initialized: + return + with self._initialize_lock: + if self._initialized: + return + assert self._context_devices is None + opts = pywrap_tfe.TFE_NewContextOptions() + try: + config_str = self.config.SerializeToString() + pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str) + if self._device_policy is not None: + pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy( + opts, self._device_policy) + if self._mirroring_policy is not None: + pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy( + opts, self._mirroring_policy) + if self._default_is_async == ASYNC: + pywrap_tfe.TFE_ContextOptionsSetAsync(opts, True) + if self._use_tfrt is not None: + pywrap_tfe.TFE_ContextOptionsSetTfrt(opts, self._use_tfrt) + pywrap_tfe.TFE_ContextOptionsSetRunEagerOpAsFunction(opts, True) + pywrap_tfe.TFE_ContextOptionsSetJitCompileRewrite( + opts, self._jit_compile_rewrite) + context_handle = pywrap_tfe.TFE_NewContext(opts) + finally: + pywrap_tfe.TFE_DeleteContextOptions(opts) + assert not (self._server_def and self._collective_ops_server_def), ( + "Cannot enable remote execution as well as collective ops at the " + "moment. If this is important to you, please file an issue.") + if self._server_def is not None: + server_def_str = self._server_def.SerializeToString() + timeout = 0 # Indicates no timeout. + pywrap_tfe.TFE_ContextSetServerDefWithTimeoutAndRetries( + context_handle, _KEEP_ALIVE_SECS, server_def_str, timeout, + self._set_server_def_retries) + elif self._collective_ops_server_def is not None: + server_def_str = self._collective_ops_server_def.SerializeToString() + pywrap_tfe.TFE_EnableCollectiveOps(context_handle, server_def_str) + + self._context_handle = context_handle + self._initialize_logical_devices() + self._initialized = True + + if self._is_global_context: + pywrap_tfe.TFE_Py_SetCEagerContext(self._context_handle) + + def ensure_uninitialized(self): + """Uninitialize handle and devices if not already done so.""" + with self._initialize_lock: + if not self._initialized: + return + self._context_devices = None + self._logical_devices = None + self._server_def = None + self._initialized = False + + if self._is_global_context: + pywrap_tfe.TFE_Py_SetCEagerContext(None) + + self._context_handle = None + + def mark_as_global_context(self): + # If the context was already initialized, publish it. Otherwise wait with + # publication until it's initialized. + if self._initialized: + pywrap_tfe.TFE_Py_SetCEagerContext(self._context_handle) + self._is_global_context = True + + def _clear_caches(self): + self.ones_rank_cache().flush() + self.zeros_cache().flush() + pywrap_tfe.TFE_ClearScalarCache() + + def get_server_def(self): + return self._server_def + + def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS): + """Allow setting a server_def on the context. + + When a server def is replaced, it effectively clears a bunch of caches + within the context. If you attempt to use a tensor object that was pointing + to a tensor on the remote device, it will raise an error. + + Args: + server_def: A tensorflow::ServerDef proto. Enables execution on remote + devices. + keep_alive_secs: Num. seconds after which the remote end will hang up. As + long as the client is still alive, the server state for the context will + be kept alive. If the client is killed (or there is some failure), the + server will clean up its context keep_alive_secs after the final RPC it + receives. + + Raises: + ValueError: if server_def is None. + """ + if not server_def: + raise ValueError("server_def is None.") + + self._server_def = server_def + + if self._context_handle: + server_def_str = server_def.SerializeToString() + pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs, + server_def_str) + self._initialize_logical_devices() + + # Clear all the caches in case there are remote tensors in them. + self._clear_caches() + # Also clear the device parsing cache since it caches the resolution of + # partial device names, which may become different due to the set_server_def + # call as we may have defined different devices. + _device_parsing_cache.clear() + + def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS): + """Update a server_def on the context. + + Args: + server_def: A tensorflow::ServerDef proto. Enables execution on remote + devices. + keep_alive_secs: Num. seconds after which the remote end will hang up. As + long as the client is still alive, the server state for the context will + be kept alive. If the client is killed (or there is some failure), the + server will clean up its context keep_alive_secs after the final RPC it + receives. + + Raises: + ValueError: if server_def is None. + """ + if not server_def: + raise ValueError("server_def is None.") + + self._server_def = server_def + + if self._context_handle: + server_def_str = server_def.SerializeToString() + pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle, + keep_alive_secs, server_def_str) + self._initialize_logical_devices() + + self._clear_caches() + + def check_alive(self, worker_name): + """Checks whether a remote worker is alive or not. + + Args: + worker_name: a string representing the remote worker. It must be a fully + specified name like "/job:worker/replica:0/task:0". + + Returns: + a boolean indicating whether the remote worker is alive or not. + + Raises: + ValueError: if context is not initialized. + """ + # TODO(yuefengz): support checking multiple workers. + if self._context_handle: + return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name) + else: + raise ValueError("Context is not initialized.") + + def sync_executors(self): + """Sync both local executors and the ones on remote workers. + + In async execution mode, local function calls can return before the + corresponding remote op/function execution requests are completed. Calling + this method creates a synchronization barrier for remote executors. It only + returns when all remote pending nodes are finished, potentially with errors + if any remote executors are in error state. + + Raises: + ValueError: if context is not initialized. + """ + if self._context_handle: + pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle) + else: + raise ValueError("Context is not initialized.") + + def clear_executor_errors(self): + """Clear errors in both local executors and remote workers. + + After receiving errors from remote workers, additional requests on the fly + could further taint the status on the remote workers due to the async nature + of remote execution. Calling this method block on waiting for all pending + nodes in remote executors to finish and clear their error statuses. + + Raises: + ValueError: if context is not initialized. + """ + if self._context_handle: + pywrap_tfe.TFE_ContextClearExecutors(self._context_handle) + else: + raise ValueError("Context is not initialized.") + + def configure_coordination_service(self, + service_type, + service_leader="", + enable_health_check=True, + cluster_register_timeout_in_ms=0, + heartbeat_timeout_in_ms=0, + shutdown_barrier_timeout_in_ms=0, + coordinated_jobs=None, + allow_new_incarnation_to_reconnect=False): + """Enable distributed coordination service with specified configs.""" + if self._context_handle: + logging.warning("Configuring coordination service type may not be " + "effective because the context is already initialized.") + config = coordination_config_pb2.CoordinationServiceConfig() + config.service_type = service_type + if service_leader: + config.service_leader = pydev.canonical_name(service_leader) + config.enable_health_check = enable_health_check + config.cluster_register_timeout_in_ms = cluster_register_timeout_in_ms + config.heartbeat_timeout_in_ms = heartbeat_timeout_in_ms + config.shutdown_barrier_timeout_in_ms = shutdown_barrier_timeout_in_ms + config.allow_new_incarnation_to_reconnect = ( + allow_new_incarnation_to_reconnect) + if coordinated_jobs is not None: + if isinstance(coordinated_jobs, list): + config.coordinated_job_list.extend(coordinated_jobs) + else: + raise ValueError("`coordinated_jobs` must be list[CoordinatedJob] or " + "None, but got: %s" % (coordinated_jobs,)) + self._coordination_service_config = config + + @property + def coordination_service(self): + return self._coordination_service_config + + def set_config_key_value(self, key, value): + ensure_initialized() + pywrap_tfe.TFE_InsertConfigKeyValue(self._context_handle, key, value) + + # If `timeout_in_ms=0`, this will block until the key-value is set or the + # worker shuts down. + def get_config_key_value(self, key, timeout_in_ms=0): + ensure_initialized() + with c_api_util.tf_buffer() as buffer_: + pywrap_tfe.TFE_GetConfigKeyValue(self._context_handle, key, + timeout_in_ms, buffer_) + value = pywrap_tf_session.TF_GetBuffer(buffer_).decode("utf-8") + return value + + def delete_config_key_value(self, key): + ensure_initialized() + pywrap_tfe.TFE_DeleteConfigKeyValue(self._context_handle, key) + + def report_error_to_cluster(self, error_code, error_message): + """Report error to other members in a multi-client cluster. + + Args: + error_code: a `tf.errors` error code. + error_message: a string. The error message. + """ + if self._context_handle: + pywrap_tfe.TFE_ReportErrorToCluster(self._context_handle, error_code, + error_message) + else: + raise ValueError("Context is not initialized.") + + def get_task_states(self, job_configs): + """Get task states from the Coordination Service. + + Args: + job_configs: A list of tuples of job name and task number. + + Returns: + A list of TF_Status. + """ + if self._context_handle: + job_names, task_nums = zip(*job_configs) + return pywrap_tfe.TFE_GetTaskStates(self._context_handle, job_names, + task_nums) + else: + raise ValueError("Context is not initialized.") + + def wait_at_barrier(self, barrier_id, timeout_in_ms): + """Blocks until all coordinated tasks are at the barrier. + + The barrier may fail if it times out or if one of the tasks is unhealthy. + + Args: + barrier_id: Unique string identifying the barrier. + timeout_in_ms: Duration before the barrier times out and fails. + """ + ensure_initialized() + pywrap_tfe.TFE_WaitAtBarrier(self._context_handle, barrier_id, + timeout_in_ms) + + def clear_kernel_cache(self): + """Clear kernel cache and reset all stateful kernels.""" + if self._context_handle is not None: + pywrap_tfe.TFE_ContextClearCaches(self._context_handle) + + def enable_collective_ops(self, server_def): + """Enable distributed collective ops with an appropriate server_def. + + Args: + server_def: A tensorflow::ServerDef proto. Enables execution on remote + devices. + + Raises: + ValueError: if server_def is None. + RuntimeError: if this method is not called at program startup. + """ + if not server_def: + raise ValueError("server_def is None.") + + self._collective_ops_server_def = server_def + + # TODO(b/129298253): Allow creating datasets/tensors before enabling + # collective ops. + if self._context_handle is not None: + logging.warning("Enabling collective ops after program startup may cause " + "error when accessing previously created tensors.") + with self._initialize_lock: + assert self._initialized + server_def_str = self._collective_ops_server_def.SerializeToString() + pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str) + self._initialize_logical_devices() + self._clear_caches() + + def configure_collective_ops( + self, + collective_leader="", + scoped_allocator_enabled_ops=("CollectiveReduce",), + use_nccl_communication=False, + device_filters=None): + """Configure collective ops. + + Collective group leader is necessary for collective ops to run, other + configurations are mainly for the purpose of performance. + + Args: + collective_leader: a device string for collective leader, e.g. + "/job:worker/replica:0/task:0"; empty string means local execution of + collective ops. + scoped_allocator_enabled_ops: a tuple or a list of op names for scoped + allocator to run with. + use_nccl_communication: whether to use nccl communication for collective + ops. + device_filters: a tuple or a list of device strings. If set, corresponding + task can only see the devices filtered by these device filters. + + Raises: + RuntimeError: if this method is not called at program startup. + """ + if self._collective_leader is not None: + if (self._collective_leader != collective_leader or + self._collective_scoped_allocator_enabled_ops != + scoped_allocator_enabled_ops or + self._collective_use_nccl_communication != use_nccl_communication or + self._collective_device_filters != device_filters): + raise ValueError("Collective ops are already configured.") + else: + return + + if self._context_handle is not None: + raise RuntimeError("Collective ops must be configured at program startup") + + self._collective_leader = collective_leader + self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops + self._collective_use_nccl_communication = use_nccl_communication + self._collective_device_filters = device_filters + + def abort_collective_ops(self, code, message): + """Abort the collective ops. + + This is intended to be used when a peer failure is detected, which allows + the user to handle the case instead of hanging. This aborts all on-going + collectives. After all subsequent collectives error immediately, and you + need to reset_context() to use collectives again. + + Args: + code: a `tf.errors` error code. + message: a string. The error message. + """ + self.ensure_initialized() + pywrap_tfe.TFE_AbortCollectiveOps(self._handle, code, message) + + def check_collective_ops_peer_health(self, task, timeout_in_ms): + """Check collective peer health. + + This probes each task to see if they're still alive. Note that restarted + tasks are considered a different one, and they're considered not healthy. + + This should only be used in multi client multi worker training. + + Args: + task: a task string, must be in the format of /job:xxx/replica:0/task:N. + timeout_in_ms: an integer, the timeout. If zero, there's no timeout. + + Raises: + tf.errors.UnavailableError: when a peer is down. + tf.errors.FailedPreconditionError: when a peer is a different one from the + one this task has talked to, e.g. the peer has restarted. + tf.errors.InvalidArgumentError: when the task string is invalid. + """ + self.ensure_initialized() + pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth(self._handle, task, + timeout_in_ms) + + @property + def _handle(self): + if self._context_handle is None: + raise AssertionError("Context must be initialized first.") + + return self._context_handle + + @property + def _devices(self): + if self._context_devices is None: + raise AssertionError("Context must be initialized first.") + + return self._context_devices + + def __str__(self): + if self._context_handle is None: + return "Eager TensorFlow Context. Devices currently uninitialized." + else: + devices = self._devices + lines = ["Eager TensorFlow Context with %d devices" % (len(devices))] + for i, d in enumerate(devices): + lines.append(" Device %d: %s" % (i, d)) + return "\n".join(lines) + + @tf_contextlib.contextmanager + def _mode(self, mode): + """A context manager to allow setting the mode to EAGER/GRAPH.""" + ctx = self._thread_local_data + old_is_eager = ctx.is_eager + ctx.is_eager = mode == EAGER_MODE + if mode == EAGER_MODE: + # Entering graph mode does not provide us with sufficient information to + # record a context switch; graph-based context switches are only logged + # when a graph is registered as the default graph. + self.context_switches.push(False, eager_mode, None) + try: + yield + finally: + ctx.is_eager = old_is_eager + if mode == EAGER_MODE: + self.context_switches.pop() + + def executing_eagerly(self): + """Returns True if current thread has eager executing enabled.""" + return self._thread_local_data.is_eager + + def ones_rank_cache(self): + """Per-device cache for scalars.""" + return _tensor_caches_map[self._id].ones_rank_cache + + def zeros_cache(self): + """Per-device cache for scalars.""" + return _tensor_caches_map[self._id].zeros_cache + + @property + def scope_name(self): + """Returns scope name for the current thread.""" + return self._thread_local_data.scope_name + + @scope_name.setter + def scope_name(self, s): + """Sets scope name for the current thread.""" + self._thread_local_data.scope_name = s + + @property + def device_name(self): + """Returns the device name for the current thread.""" + return self._thread_local_data.device_name + + @property + def device_spec(self): + """Returns the device spec for the current thread.""" + return self._thread_local_data.device_spec + + def _set_device(self, device_name, device_spec): + self._thread_local_data.device_name = device_name + self._thread_local_data.device_spec = device_spec + + def device(self, name): + """Context-manager to force placement of operations and Tensors on a device. + + Args: + name: Name of the device or None to get default placement. + + Returns: + Context manager that forces device placement. + + Raises: + ValueError: If name is not a string or is an invalid device name. + RuntimeError: If device scopes are not properly nested. + """ + if isinstance(name, LogicalDevice): + name = name.name + elif pydev.is_device_spec(name): + name = name.to_string() + return _EagerDeviceContext(self, name) + + def devices(self): + """List of the names of devices available to execute operations.""" + return self._devices + + def host_address_space(self): + self.ensure_initialized() + with c_api_util.tf_buffer() as buffer_: + pywrap_tfe.TFE_HostAddressSpace(self._context_handle, buffer_) + address_space = pywrap_tf_session.TF_GetBuffer(buffer_).decode("utf-8") + return address_space + + # TODO(fishx): remove this property. + @property + def execution_mode(self): + """Gets execution mode for current thread.""" + return ASYNC if self.is_async() else SYNC + + @execution_mode.setter + def execution_mode(self, mode): + """Sets execution mode for current thread.""" + if mode not in (None, SYNC, ASYNC): + raise ValueError("Execution mode should be None/SYNC/ASYNC. Got %s" % + mode) + + if mode is None: + mode = SYNC + + enable_async = (mode == ASYNC) + if self.is_async() != enable_async: + # Only set the execution mode if the context has already been initialized + if self._context_handle is not None: + self.executor.wait() + executor_new = executor.new_executor(enable_async) + self._thread_local_data.executor = executor_new + pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, + executor_new.handle()) + else: + self._default_is_async = enable_async + + def is_async(self): + if self._context_handle is not None: + return self.executor.is_async() + else: + return self._default_is_async + + @property + def executor(self): + self.ensure_initialized() + return executor.Executor( + pywrap_tfe.TFE_ContextGetExecutorForThread(self._context_handle)) + + @executor.setter + def executor(self, e): + self.ensure_initialized() + pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, e.handle()) + + @property + def config(self): + """Return the ConfigProto with all runtime deltas applied.""" + # Ensure physical devices have been discovered and config has been imported + self._initialize_physical_devices() + + config = config_pb2.ConfigProto() + if self._config is not None: + config.CopyFrom(self._config) + + if self._optimizer_jit is not None: + config.graph_options.optimizer_options.global_jit_level = ( + config_pb2.OptimizerOptions.ON_1 + if self._optimizer_jit else config_pb2.OptimizerOptions.OFF) + if self._intra_op_parallelism_threads is not None: + config.intra_op_parallelism_threads = self._intra_op_parallelism_threads + if self._inter_op_parallelism_threads is not None: + config.inter_op_parallelism_threads = self._inter_op_parallelism_threads + + if self._soft_device_placement is not None: + config.allow_soft_placement = self._soft_device_placement + else: + config.allow_soft_placement = self.executing_eagerly() + + if self._log_device_placement is not None: + config.log_device_placement = self._log_device_placement + + if self._operation_timeout_in_ms is not None: + config.operation_timeout_in_ms = self._operation_timeout_in_ms + + is_mlir_bridge_enabled = pywrap_tfe.TF_IsMlirBridgeEnabled() + config.experimental.mlir_bridge_rollout = is_mlir_bridge_enabled + if (is_mlir_bridge_enabled == + config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_ENABLED): + config.experimental.enable_mlir_bridge = True + + if self._enable_mlir_graph_optimization is not None: + config.experimental.enable_mlir_graph_optimization = ( + self._enable_mlir_graph_optimization) + + def rewriter_toggle(option): + toggle = self._optimizer_experimental_options.get(option, None) + if toggle is None: + return + + setattr(config.graph_options.rewrite_options, option, + (rewriter_config_pb2.RewriterConfig.ON + if toggle else rewriter_config_pb2.RewriterConfig.OFF)) + + def rewriter_bool(option): + toggle = self._optimizer_experimental_options.get(option, None) + if toggle is None: + return + + setattr(config.graph_options.rewrite_options, option, toggle) + + rewriter_toggle("layout_optimizer") + rewriter_toggle("constant_folding") + rewriter_toggle("shape_optimization") + rewriter_toggle("remapping") + rewriter_toggle("arithmetic_optimization") + rewriter_toggle("dependency_optimization") + rewriter_toggle("loop_optimization") + rewriter_toggle("function_optimization") + rewriter_toggle("debug_stripper") + rewriter_bool("disable_model_pruning") + rewriter_toggle("scoped_allocator_optimization") + rewriter_toggle("pin_to_host_optimization") + rewriter_toggle("implementation_selector") + rewriter_toggle("auto_mixed_precision") + rewriter_toggle("use_plugin_optimizers") + rewriter_bool("disable_meta_optimizer") + rewriter_toggle("auto_mixed_precision_onednn_bfloat16") + rewriter_toggle("auto_mixed_precision_mkl") + nodes = self._optimizer_experimental_options.get("min_graph_nodes", None) + if nodes is not None: + config.graph_options.rewrite_options.min_graph_nodes = nodes + + # Compute device counts + config.device_count["CPU"] = 0 + config.device_count["GPU"] = 0 + for dev in self._physical_devices: + if dev not in self._visible_device_list: + continue + + virtual_devices = self._virtual_device_map.get(dev) + if virtual_devices is None: + config.device_count[dev.device_type] += 1 + else: + config.device_count[dev.device_type] += len(virtual_devices) + + # Configure gpu_options + gpu_options = self._compute_gpu_options() + config.gpu_options.MergeFrom(gpu_options) + + # Configure collective ops + if self._collective_leader: + config.experimental.collective_group_leader = self._collective_leader + if self._collective_scoped_allocator_enabled_ops: + rewrite_options = config.graph_options.rewrite_options + rewrite_options.scoped_allocator_optimization = ( + rewriter_config_pb2.RewriterConfig.ON) + del rewrite_options.scoped_allocator_opts.enable_op[:] + for op in self._collective_scoped_allocator_enabled_ops: + rewrite_options.scoped_allocator_opts.enable_op.append(op) + if self._collective_use_nccl_communication: + config.experimental.collective_nccl = True + if self._collective_device_filters: + del config.device_filters[:] + for f in self._collective_device_filters: + config.device_filters.append(f) + + # Configure coordination service + if self._coordination_service_config: + config.experimental.coordination_config.CopyFrom( + self._coordination_service_config) + + return config + + def _compute_gpu_options(self): + """Build the GPUOptions proto.""" + visible_device_list = [] + virtual_devices = [] + gpu_index = -1 + memory_growths = set() + gpu_devices = self.list_physical_devices("GPU") + pluggable_devices = self._pluggable_devices + compatible_devices = gpu_devices + for dev in pluggable_devices: + if dev not in gpu_devices: + compatible_devices.append(dev) + for dev in compatible_devices: + gpu_index += 1 + + if dev not in self._visible_device_list: + continue + + growth = self._memory_growth_map[dev] + memory_growths.add(growth) + visible_device_list.append(str(gpu_index)) + + if self._virtual_device_map: + vdevs = self._virtual_device_map.get(dev, []) + device_ordinals = [] + device_limits = [] + priority = [] + for virt_dev in vdevs: + if virt_dev.experimental_device_ordinal is not None: + device_ordinals.append(virt_dev.experimental_device_ordinal) + device_limits.append(virt_dev.memory_limit) + if virt_dev.experimental_priority is not None: + priority.append(virt_dev.experimental_priority) + # If priority is specified, it must be specified for all virtual + # devices. + if priority and len(device_limits) != len(priority): + raise ValueError("priority must be specified for all virtual devices") + # If device_ordinals is specified, it must be specified for all virtual + # devices. + if device_ordinals and len(device_limits) != len(device_ordinals): + raise ValueError( + "device_ordinals must be specified for all virtual devices") + + virtual_devices.append( + config_pb2.GPUOptions.Experimental.VirtualDevices( + memory_limit_mb=device_limits, + priority=priority, + device_ordinal=device_ordinals)) + + # Only compute growth if virtual devices have not been configured and we + # have GPUs + if not virtual_devices and memory_growths: + if len(memory_growths) > 1: + raise ValueError("Memory growth cannot differ between GPU devices") + allow_growth = memory_growths.pop() + else: + allow_growth = None + + return config_pb2.GPUOptions( + allow_growth=allow_growth, + visible_device_list=",".join(visible_device_list), + experimental=config_pb2.GPUOptions.Experimental( + virtual_devices=virtual_devices)) + + @property + def function_call_options(self): + """Returns function call options for current thread. + + Note that the returned object is still referenced by the eager context. + + Returns: the FunctionCallOptions for current thread. + """ + if self._thread_local_data.function_call_options is None: + config = self.config + + # Default to soft placement for functions unless specified + if self._soft_device_placement is None: + config.allow_soft_placement = True + self._thread_local_data.function_call_options = FunctionCallOptions( + config_proto=config) + + return self._thread_local_data.function_call_options + + @function_call_options.setter + def function_call_options(self, options): + """Returns function call options for current thread.""" + self._thread_local_data.function_call_options = options + + def num_gpus(self): + """The number of GPUs available to execute operations.""" + self.ensure_initialized() + return self._num_gpus + + def add_c_function(self, c_func): + """Add a C API TF_Function to the context. + + Once added, the function (identified by its name) can be executed like any + other operation. + + Args: + c_func: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper). + """ + self.ensure_initialized() + pywrap_tfe.TFE_ContextAddFunction(self._handle, c_func) + + def get_c_function(self, name): + """Get a C API TF_Function from the context. + + Args: + name: Name of the function to get. + + Returns: + A ScopedTFFunction wrapping the C API TF_Function. + """ + self.ensure_initialized() + return c_api_util.ScopedTFFunction( + pywrap_tfe.TFE_ContextGetFunction(self._handle, name), name + ) + + def add_function_def(self, fdef): + """Add a function definition to the context. + + Once added, the function (identified by its name) can be executed like any + other operation. + + Args: + fdef: A FunctionDef protocol buffer message. + """ + self.ensure_initialized() + if is_oss: + fdef_string = fdef.SerializeToString() + pywrap_tfe.TFE_ContextAddFunctionDef( + self._handle, fdef_string, len(fdef_string) + ) + else: + pywrap_tfe.TFE_ContextAddFunctionDefNoSerialization(self._handle, fdef) + + def get_function_def(self, name): + """Get a function definition from the context. + + Args: + name: function signature name. + + Returns: + The requested FunctionDef. + + Raises: + tf.errors.NotFoundError: if name is not the name of a registered function. + """ + if is_oss: + with c_api_util.tf_buffer() as buffer_: + pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_) + proto_data = pywrap_tf_session.TF_GetBuffer(buffer_) + function_def = function_pb2.FunctionDef() + function_def.ParseFromString(proto_data) + else: + function_def = pywrap_tfe.TFE_ContextGetFunctionDefNoSerialization( + self._handle, name + ) + return function_def + + def get_graph_debug_info(self, name): + """Get GraphDebugInfo associated with a function from the context. + + Args: + name: function signature name. + + Returns: + The requested GraphDebugInfo. + + Raises: + tf.errors.NotFoundError: if name is not the name of a registered function. + """ + with c_api_util.tf_buffer() as buffer_: + pywrap_tfe.TFE_ContextGetGraphDebugInfo(self._handle, name, buffer_) + proto_data = pywrap_tf_session.TF_GetBuffer(buffer_) + graph_debug_info = graph_debug_info_pb2.GraphDebugInfo() + graph_debug_info.ParseFromString(proto_data) + + return graph_debug_info + + def is_custom_device(self, device_name): + """Calls TFE_IsCustomDevice. See the non-member function.""" + self.ensure_initialized() + return pywrap_tfe.TFE_Py_IsCustomDevice(self._handle, device_name) + + def register_custom_device(self, device_capsule, device_name, + device_info_capsule): + """Calls TFE_RegisterCustomDevice. See the non-member function.""" + self.ensure_initialized() + pywrap_tfe.TFE_Py_RegisterCustomDevice(self._handle, device_capsule, + device_name, device_info_capsule) + + def pack_eager_tensors(self, tensors): + """Pack multiple `EagerTensor`s of the same dtype and shape. + + Args: + tensors: a list of EagerTensors to pack. + + Returns: + A packed EagerTensor. + """ + self.ensure_initialized() + return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors) + + def list_function_names(self): + """Get a list of names of registered functions. + + Returns: + A set of names of all registered functions for the context. + """ + self.ensure_initialized() + return set(pywrap_tfe.TFE_ContextListFunctionNames(self._handle)) + + def remove_function(self, name): + """Remove a function from the context. + + Once removed, the function cannot be executed anymore. + + Args: + name: function signature name. + """ + self.ensure_initialized() + pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name) + + def has_function(self, name): + """Check if a function `name` is registered.""" + self.ensure_initialized() + return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name)) + + @property + def function_scope_id(self): + """Returns an id that is unique to each scope holding functions.""" + return id(self._context_handle) + + def call_function(self, name, tensor_inputs, num_outputs): + """Calls the function associated with the given name.""" + attrs = tuple( + itertools.chain( + *self.function_call_options.as_attrs().items() + ) + ) + + cancellation_context = cancellation.context() + if cancellation_context is None: + outputs = execute.execute( + name.decode("utf-8"), + num_outputs=num_outputs, + inputs=tensor_inputs, + attrs=attrs, + ctx=self, + ) + else: + outputs = execute.execute_with_cancellation( + name.decode("utf-8"), + num_outputs=num_outputs, + inputs=tensor_inputs, + attrs=attrs, + ctx=self, + cancellation_manager=cancellation_context, + ) + # Empty list means no function outputs so return None + outputs = outputs or None + + return outputs + + def add_op_callback(self, callback): + """Add a post-op callback to the context. + + A post-op callback is invoked immediately after an eager operation or + function has finished execution or after a op has been added to a graph, + providing access to the op's type, name input and output tensors. Multiple + op callbacks can be added, in which case the callbacks will be invoked in + the order in which they are added. + + Args: + callback: a callable of the signature `f(op_type, inputs, attrs, outputs, + op_name=None, graph=None)`. See doc strings in `op_callbacks.py` for + details on the function signature and its semantics. + """ + if callback not in self._thread_local_data.op_callbacks: + self._thread_local_data.op_callbacks.append(callback) + + def remove_op_callback(self, callback): + """Remove an already-registered op callback. + + Args: + callback: The op callback to be removed. + + Raises: + KeyError: If `callback` is not already registered. + """ + if callback not in self._thread_local_data.op_callbacks: + raise KeyError("The specified op callback has not been registered, " + "and hence cannot be removed.") + del self._thread_local_data.op_callbacks[ + self._thread_local_data.op_callbacks.index(callback)] + + @property + def op_callbacks(self): + return self._thread_local_data.op_callbacks + + @property + def invoking_op_callbacks(self): + return self._thread_local_data.invoking_op_callbacks + + @invoking_op_callbacks.setter + def invoking_op_callbacks(self, value): + self._thread_local_data.invoking_op_callbacks = value + + def _initialize_physical_devices(self, reinitialize=False): + """Gets local devices visible to the system. + + Args: + reinitialize: If True, reinitializes self._physical_devices so that + dynamic registered devices will also be visible to the python front-end. + """ + # We lazy initialize self._physical_devices since we do not want to do this + # the constructor since the backend may not be initialized yet. + with self._device_lock: + if not reinitialize and self._physical_devices is not None: + return + + devs = pywrap_tfe.TF_ListPhysicalDevices() + self._physical_devices = [ + PhysicalDevice(name=d.decode(), device_type=d.decode().split(":")[1]) + for d in devs + ] + self._physical_device_to_index = { + p: i for i, p in enumerate(self._physical_devices) + } + # We maintain a separate list just so we can check whether the device in + # _physical_devices is a PluggableDevice. + pluggable_devs = pywrap_tfe.TF_ListPluggablePhysicalDevices() + self._pluggable_devices = [ + PhysicalDevice(name=d.decode(), device_type=d.decode().split(":")[1]) + for d in pluggable_devs + ] + + self._visible_device_list = list(self._physical_devices) + self._memory_growth_map = { + d: None + for d in self._physical_devices + if d.device_type == "GPU" or d in self._pluggable_devices + } + + # Import device settings that may have been passed into the constructor + self._import_config() + + def reinitialize_physical_devices(self): + """Gets local devices visible to the system.""" + # Reinitialize the physical device list after registering + # the pluggable device. + self._initialize_physical_devices(True) + + def list_physical_devices(self, device_type=None): + """List local devices visible to the system. + + This API allows a client to query the devices before they have been + initialized by the eager runtime. Additionally a user can filter by device + type, to get only CPUs or GPUs. + + Args: + device_type: Optional device type to limit results to + + Returns: + List of PhysicalDevice objects. + """ + self._initialize_physical_devices() + + if device_type is None: + return list(self._physical_devices) + + return [d for d in self._physical_devices if d.device_type == device_type] + + def get_device_details(self, device): # pylint: disable=redefined-outer-name + """Returns details about a physical devices. + + Args: + device: A `tf.config.PhysicalDevice` returned by + `tf.config.list_physical_devices` or `tf.config.get_visible_devices`. + + Returns: + A dict with string keys. + """ + if not isinstance(device, PhysicalDevice): + raise ValueError("device must be a tf.config.PhysicalDevice, but got: " + "%s" % (device,)) + if (self._physical_device_to_index is None or + device not in self._physical_device_to_index): + raise ValueError("The PhysicalDevice must be one obtained from " + "calling `tf.config.list_physical_devices`, but got: " + "%s" % (device,)) + index = self._physical_device_to_index[device] + details = pywrap_tfe.TF_GetDeviceDetails(index) + + # Change compute_capability from a string to a tuple + if "compute_capability" in details: + try: + major, minor = details["compute_capability"].split(".") + details["compute_capability"] = (int(major), int(minor)) + except ValueError: + raise RuntimeError("Device returned compute capability an in invalid " + "format: %s" % details["compute_capability"]) + return details + + def _import_config(self): + """Import config if passed in during construction. + + If Context was created with a ConfigProto such as when calling + tf.compat.v1.enable_eager_execution(), then we need to pull out the + various pieces we might be replacing and import then into our internal + class representation. + """ + if self._config is None: + return + + num_cpus = self._config.device_count.get("CPU", 1) + if num_cpus != 1: + cpus = [d for d in self._physical_devices if d.device_type == "CPU"] + if num_cpus == 0: + self.set_visible_devices([], "CPU") + elif num_cpus > 1: + self.set_logical_device_configuration( + cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)]) + + # Parse GPU options + gpus = [d for d in self._physical_devices if d.device_type == "GPU"] + + # If there are no GPUs detected, simply ignore all the GPU options passed in + # rather than doing any validation checks. + if not gpus: + return + + gpu_count = self._config.device_count.get("GPU", None) + + visible_gpus = [] + # TODO(gjn): Handle importing existing virtual GPU configuration + visible_indices = self._config.gpu_options.visible_device_list + if visible_indices: + for index in visible_indices.split(","): + if int(index) >= len(gpus): + raise ValueError("Invalid visible device index: %s" % index) + visible_gpus.append(gpus[int(index)]) + else: + visible_gpus = gpus + + if gpu_count is not None: + visible_gpus = visible_gpus[:gpu_count] + + self.set_visible_devices(visible_gpus, "GPU") + + def list_logical_devices(self, device_type=None): + """Return logical devices.""" + self.ensure_initialized() + if device_type is None: + return list(self._logical_devices) + + return [d for d in self._logical_devices if d.device_type == device_type] + + def get_visible_devices(self, device_type=None): + """Get the list of visible devices.""" + self._initialize_physical_devices() + + if device_type is None: + return list(self._visible_device_list) + + return [ + d for d in self._visible_device_list if d.device_type == device_type + ] + + def set_visible_devices(self, devices, device_type=None): + """Set the list of visible devices.""" + self._initialize_physical_devices() + + if not isinstance(devices, list): + devices = [devices] + + for d in devices: + if d not in self._physical_devices: + raise ValueError("Unrecognized device: %s" % repr(d)) + if device_type is not None and d.device_type != device_type: + raise ValueError("Unrecognized device: %s" % repr(d)) + + visible_device_list = [] + if device_type is not None: + visible_device_list = [ + d for d in self._visible_device_list if d.device_type != device_type + ] + + visible_device_list += devices + + if self._visible_device_list == visible_device_list: + return + + if self._context_handle is not None: + raise RuntimeError( + "Visible devices cannot be modified after being initialized") + + self._visible_device_list = visible_device_list + + def get_memory_info(self, dev): + """Returns a dict of memory info for the device.""" + self._initialize_physical_devices() + self.ensure_initialized() + return pywrap_tfe.TFE_GetMemoryInfo(self._context_handle, dev) + + def reset_memory_stats(self, dev): + """Resets the tracked memory stats for the device.""" + self._initialize_physical_devices() + self.ensure_initialized() + pywrap_tfe.TFE_ResetMemoryStats(self._context_handle, dev) + + def get_memory_growth(self, dev): + """Get if memory growth is enabled for a PhysicalDevice.""" + self._initialize_physical_devices() + + if dev not in self._physical_devices: + raise ValueError("Unrecognized device: %s" % repr(dev)) + + return self._memory_growth_map[dev] + + def set_memory_growth(self, dev, enable): + """Set if memory growth should be enabled for a PhysicalDevice.""" + self._initialize_physical_devices() + + if dev not in self._physical_devices: + raise ValueError("Unrecognized device: %s" % repr(dev)) + + if dev in self._virtual_device_map: + raise ValueError( + "Cannot set memory growth on device when virtual devices configured") + + if dev.device_type != "GPU" and dev not in self._pluggable_devices: + raise ValueError( + "Cannot set memory growth on non-GPU and non-Pluggable devices") + + if self._memory_growth_map.get(dev) == enable: + return + + if self._context_handle is not None: + raise RuntimeError( + "Physical devices cannot be modified after being initialized") + + self._memory_growth_map[dev] = enable + + def get_logical_device_configuration(self, dev): + """Get the virtual device configuration for a PhysicalDevice.""" + self._initialize_physical_devices() + + if dev not in self._physical_devices: + raise ValueError("Unrecognized device: %s" % repr(dev)) + + return self._virtual_device_map.get(dev) + + def set_logical_device_configuration(self, dev, virtual_devices): + """Set the virtual device configuration for a PhysicalDevice.""" + self._initialize_physical_devices() + + if dev not in self._physical_devices: + raise ValueError("Unrecognized device: %s" % repr(dev)) + + if dev.device_type == "CPU": + for vdev in virtual_devices: + if vdev.memory_limit is not None: + raise ValueError("Setting memory limit on CPU virtual devices is " + "currently not supported") + if vdev.experimental_priority is not None: + raise ValueError("Setting experimental_priority on CPU virtual " + " devices is currently not supported") + if vdev.experimental_device_ordinal is not None: + raise ValueError("Setting experimental_device_ordinal on CPU virtual " + " devices is currently not supported") + elif dev.device_type == "GPU": + for vdev in virtual_devices: + if vdev.memory_limit is None: + raise ValueError( + "Setting memory limit is required for GPU virtual devices") + else: + raise ValueError("Virtual devices are not supported for %s" % + dev.device_type) + + if self._virtual_device_map.get(dev) == virtual_devices: + return + + if self._context_handle is not None: + raise RuntimeError( + "Virtual devices cannot be modified after being initialized") + + self._virtual_device_map[dev] = virtual_devices + + def set_logical_cpu_devices(self, num_cpus, prefix=""): + """Set virtual CPU devices in context. + + If virtual CPU devices are already configured at context initialization + by tf.config.set_logical_device_configuration(), this method should not be + called. + + Args: + num_cpus: Number of virtual CPUs. + prefix: Device name prefix. + + Raises: + RuntimeError: If virtual CPUs are already configured at context + initialization. + """ + server_def = self._server_def or self._collective_ops_server_def + local_prefix = ["/device"] + if server_def is not None: + local_prefix.append("/job:%s/replica:0/task:%d" % (server_def.job_name, + server_def.task_index)) + logical_local_devices = [d for d in self.list_logical_devices("CPU") if + d.name.startswith(tuple(local_prefix))] + self.ensure_initialized() + # Error out if there are already multiple logical CPU in the context. + if len(logical_local_devices) > 1: + raise RuntimeError("Virtual CPUs already set, cannot modify again.") + + pywrap_tfe.TFE_SetLogicalCpuDevices(self._context_handle, num_cpus, prefix) + self._initialize_logical_devices() + + def get_compiler_ir( + self, + device_name, + platform_name, + function_name, + flat_args, + captured_inputs, + stage="hlo", + ): + """Get the compiler IR bytes. + + Args: + device_name: The name of the device with the form as + "/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc. + When this is used, actual device is needed for getting the compiler IR. + platform_name: The name of the platform, e.g. "TPU". When this is used, + first we find a device whose name contains the platform, if it is found + we get the compiler IR by device. Otherwise the compiler IR is obtained + as if using that device. The former logic of falling back to device is + necessary, as there are cases of TF variables that need to access + devices, but the upper layer may generally choose platform for getting + compiler IR in a device-agnostic way. + function_name: The name of the function to get the compiler IR. + flat_args: The flat argument inputs. + captured_inputs: The inputs that are captured. + stage: The exported stage for the given function. + + Returns: + The compiler IR bytes. + """ + return pywrap_tfe.TF_GetCompilerIr( + self._context_handle, + function_name, + stage, + device_name, + flat_args, + captured_inputs, + platform_name, + ) + + @deprecated( + None, "XLA:CPU and XLA:GPU devices are deprecated", warn_once=True) + def enable_xla_devices(self): + """Enables XLA:CPU and XLA:GPU devices registration.""" + pywrap_tfe.TF_EnableXlaDevices() + + @property + def enable_mlir_bridge(self): + return pywrap_tfe.TF_IsMlirBridgeEnabled() + + @property + def enable_mlir_graph_optimization(self): + return self._enable_mlir_graph_optimization + + @enable_mlir_bridge.setter + def enable_mlir_bridge(self, enabled): + pywrap_tfe.TF_EnableMlirBridge(enabled) + self._thread_local_data.function_call_options = None + + @enable_mlir_graph_optimization.setter + def enable_mlir_graph_optimization(self, enabled): + self._enable_mlir_graph_optimization = enabled + self._thread_local_data.function_call_options = None + + @property + def optimizer_jit(self): + level = self.config.graph_options.optimizer_options.global_jit_level + return (level == config_pb2.OptimizerOptions.ON_1 or + level == config_pb2.OptimizerOptions.ON_2) + + @optimizer_jit.setter + def optimizer_jit(self, enabled): + self._optimizer_jit = enabled + + self._thread_local_data.function_call_options = None + + def get_optimizer_experimental_options(self): + """Get experimental options for the optimizer. + + Returns: + Dictionary of current option values + """ + rewrite_options = self.config.graph_options.rewrite_options + options = {} + + def rewriter_toggle(option): + attr = getattr(rewrite_options, option) + if attr != 0: + options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON) + + def rewriter_bool(option): + options[option] = getattr(rewrite_options, option) + + rewriter_toggle("layout_optimizer") + rewriter_toggle("constant_folding") + rewriter_toggle("shape_optimization") + rewriter_toggle("remapping") + rewriter_toggle("arithmetic_optimization") + rewriter_toggle("dependency_optimization") + rewriter_toggle("loop_optimization") + rewriter_toggle("function_optimization") + rewriter_toggle("debug_stripper") + rewriter_bool("disable_model_pruning") + rewriter_toggle("scoped_allocator_optimization") + rewriter_toggle("pin_to_host_optimization") + rewriter_toggle("implementation_selector") + rewriter_toggle("auto_mixed_precision") + rewriter_toggle("use_plugin_optimizers") + rewriter_bool("disable_meta_optimizer") + rewriter_toggle("auto_mixed_precision_onednn_bfloat16") + rewriter_toggle("auto_mixed_precision_mkl") + + if rewrite_options.min_graph_nodes != 0: + options["min_graph_nodes"] = rewrite_options.min_graph_nodes + + return options + + def set_optimizer_experimental_options(self, options): + """Set experimental options for the optimizer. + + Args: + options: Dictionary of options to modify + """ + self._optimizer_experimental_options.update(options) + + self._thread_local_data.function_call_options = None + + @property + def intra_op_parallelism_threads(self): + return self.config.intra_op_parallelism_threads + + @intra_op_parallelism_threads.setter + def intra_op_parallelism_threads(self, num_threads): + if self._intra_op_parallelism_threads == num_threads: + return + + if self._context_handle is not None: + raise RuntimeError( + "Intra op parallelism cannot be modified after initialization.") + + self._intra_op_parallelism_threads = num_threads + + @property + def inter_op_parallelism_threads(self): + return self.config.inter_op_parallelism_threads + + @inter_op_parallelism_threads.setter + def inter_op_parallelism_threads(self, num_threads): + if self._inter_op_parallelism_threads == num_threads: + return + + if self._context_handle is not None: + raise RuntimeError( + "Inter op parallelism cannot be modified after initialization.") + + self._inter_op_parallelism_threads = num_threads + + @property + def soft_device_placement(self): + return self.config.allow_soft_placement + + @soft_device_placement.setter + def soft_device_placement(self, enable): + if self._context_handle is not None: + pywrap_tfe.TFE_ContextSetSoftDevicePlacement(self._handle, enable) + + self._soft_device_placement = enable + self._thread_local_data.function_call_options = None + + @property + def log_device_placement(self): + return self.config.log_device_placement + + @log_device_placement.setter + def log_device_placement(self, enable): + if self._context_handle is not None: + pywrap_tfe.TFE_ContextSetLogDevicePlacement(self._handle, enable) + + self._log_device_placement = enable + self._thread_local_data.function_call_options = None + + @property + def jit_compile_rewrite(self): + return self._jit_compile_rewrite + + @jit_compile_rewrite.setter + def jit_compile_rewrite(self, enable): + if self._context_handle is not None: + pywrap_tfe.TFE_ContextSetJitCompileRewrite(self._handle, enable) + self._jit_compile_rewrite = enable + + @property + def device_policy(self): + # Only get the policy from the context if it has already been initialized + if self._context_handle is not None: + return pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(self._handle) + + return self._device_policy + + @device_policy.setter + def device_policy(self, policy): + if policy is None: + policy = DEVICE_PLACEMENT_SILENT + + if self._device_policy != policy: + self._device_policy = policy + + # Only set the policy if the context has already been initialized + if self._context_handle is not None: + pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy( + self._handle, self._device_policy) + + @property + def use_tfrt(self): + return self._use_tfrt + + @use_tfrt.setter + def use_tfrt(self, tfrt): + """Sets whether to use TFRT.""" + if not isinstance(tfrt, bool): + raise ValueError("Expecting a boolean but got %s" % type(tfrt)) + + if self._use_tfrt != tfrt: + if self._initialized: + raise ValueError("use_tfrt should be set before being initialized.") + self._use_tfrt = tfrt + + @property + def operation_timeout_in_ms(self): + return self.config.operation_timeout_in_ms + + @operation_timeout_in_ms.setter + def operation_timeout_in_ms(self, timeout_in_ms): + if self._operation_timeout_in_ms == timeout_in_ms: + return + + if self._context_handle is not None: + raise RuntimeError( + "Operation timeout cannot be modified after initialization.") + + self._operation_timeout_in_ms = timeout_in_ms + + def enable_run_metadata(self): + """Enables tracing of op execution via RunMetadata. + + To retrieve the accumulated metadata call context.export_run_metadata() + and to stop tracing call context.disable_run_metadata(). + """ + self.ensure_initialized() + pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle) + + def disable_run_metadata(self): + """Disables tracing of op execution via RunMetadata.""" + if not self._context_handle: + return + pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle) + + def enable_graph_collection(self): + """Enables graph collection of executed functions. + + To retrieve the accumulated graphs call context.export_run_metadata() + and to stop collecting graphs call context.disable_graph_collection(). + """ + self.ensure_initialized() + pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle) + + def disable_graph_collection(self): + """Disables graph collection of executed functions.""" + if not self._context_handle: + return + pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle) + + def export_run_metadata(self): + """Returns a RunMetadata proto with accumulated information. + + The returned protocol buffer contains information since the most recent call + to either enable_run_metadata or export_run_metadata. + + Returns: + A RunMetadata protocol buffer. Or None if not enabled. + """ + if not self._context_handle: + return None + with c_api_util.tf_buffer() as buffer_: + pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_) + proto_data = pywrap_tf_session.TF_GetBuffer(buffer_) + run_metadata = config_pb2.RunMetadata() + run_metadata.ParseFromString(compat.as_bytes(proto_data)) + return run_metadata + + def set_server_def_retries(self, retries): + """Set the number of retries to use when calling SetServerDef. + + In cases where many servers run in high-preemption environments, jobs could + be preempted during startup and initial connection via SetServerDef. Retries + allow for more robust connection in these environments. + + Args: + retries: int specifying the number of connection retries before failing. + Retries follow an exponential backoff waiting period with min value 1ms, + max value 10s, and exponent 1.3. + """ + self._set_server_def_retries = retries + + @property + def context_switches(self): + """Returns a stack of context switches.""" + return self._context_switches + + +class _EagerDeviceContext(object): + """Context-manager forcing placement of ops and Tensors on a device.""" + + __slots__ = ["_device_name", "_ctx", "_stack"] + + def __init__(self, ctx, device_name): + self._device_name = device_name + self._ctx = ctx + self._stack = [] + + # TODO(b/189233748): Consolidate the device string parsing logic with + # tensorflow/core/util/device_name_utils.cc. + def __enter__(self): + ctx = self._ctx + old_device_name = ctx.device_name + old_device_spec = ctx.device_spec + new_device_name = self._device_name + cache_key = (old_device_name, new_device_name) + try: + new_device_name, new_device_spec = _device_parsing_cache[cache_key] + except TypeError: + # Error while trying to compute the cache key. + raise ValueError("Expecting a string device name. Got %s(%s)" % + (type(new_device_name), new_device_name)) + except KeyError: + # Handle a cache miss. + if new_device_name is not None: + if not isinstance(new_device_name, str): + raise ValueError("Expecting a string device name. Got %s(%s)" % + (type(new_device_name), new_device_name)) + device_spec = pydev.DeviceSpec.from_string(new_device_name) + if old_device_name: + new_device_spec = copy.copy(old_device_spec) + else: + ctx.ensure_initialized() + new_device_spec = pydev.DeviceSpec.from_string( + ctx._context_devices[0]) # pylint: disable=protected-access + new_device_spec = new_device_spec.make_merged_spec(device_spec) + else: + new_device_spec = pydev.DeviceSpec.from_string("") + new_device_name = new_device_spec.to_string() + _device_parsing_cache[cache_key] = (new_device_name, new_device_spec) + + ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access + self._stack.append((old_device_name, old_device_spec, new_device_spec)) + + def __exit__(self, *ex_info): + ctx = self._ctx + old_device_name, old_device_spec, new_device_spec = self._stack[-1] + if ctx.device_spec is not new_device_spec: + raise RuntimeError("Exiting device scope without proper scope nesting") + del self._stack[-1] + ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access + + +# Do not change directly. +_context = None +_context_lock = threading.Lock() + + +def _set_context_locked(ctx): + global _context + pywrap_tfe.TFE_Py_SetEagerContext(ctx) + ctx.mark_as_global_context() + _context = ctx + + +def _set_context(ctx): + with _context_lock: + _set_context_locked(ctx) + + +def _create_context(): + with _context_lock: + if _context is None: + ctx = Context() + _set_context_locked(ctx) + + +def _reset_context(): + """Clears and re-initializes the singleton context. + + Should only be used for testing. + """ + global _context + global _device_parsing_cache + + # Garbage collect and clear scalar cache to avoid Tensor from current context + # polluting next context. + gc.collect() + pywrap_tfe.TFE_ClearScalarCache() + with _context_lock: + if _context is not None: + _context._clear_caches() + _context = None + _create_context() + _device_parsing_cache = {} + + +def _reset_jit_compiler_flags(): + """Clears and re-initializes the TF JIT compiler flags. + + Should only be used for testing. + """ + pywrap_tfe.TF_ResetJitCompilerFlags() + + +def context() -> Context: + """Returns a singleton context object.""" + if _context is None: + _create_context() + return _context + + +def context_safe(): + """Returns current context (or None if one hasn't been initialized).""" + return _context + + +def ensure_initialized(): + """Initialize the context.""" + context().ensure_initialized() + + +def initialize_logical_devices(): + """Initialize the virtual devices.""" + context()._initialize_logical_devices() # pylint: disable=protected-access + + +def set_global_seed(seed): + """Sets the eager mode seed.""" + context()._set_global_seed(seed) # pylint: disable=protected-access + + +def global_seed(): + """Returns the eager mode seed.""" + return context()._seed # pylint: disable=protected-access + + +def internal_operation_seed(): + """Returns the operation seed generated based on global seed.""" + return context()._internal_operation_seed() # pylint: disable=protected-access + + +@tf_export("executing_eagerly", v1=[]) +def executing_eagerly(): + """Checks whether the current thread has eager execution enabled. + + Eager execution is enabled by default and this API returns `True` + in most of cases. However, this API might return `False` in the following use + cases. + + * Executing inside `tf.function`, unless under `tf.init_scope` or + `tf.config.run_functions_eagerly(True)` is previously called. + * Executing inside a transformation function for `tf.dataset`. + * `tf.compat.v1.disable_eager_execution()` is called. + + General case: + + >>> print(tf.executing_eagerly()) + True + + Inside `tf.function`: + + >>> @tf.function + ... def fn(): + ... with tf.init_scope(): + ... print(tf.executing_eagerly()) + ... print(tf.executing_eagerly()) + >>> fn() + True + False + + Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called: + + >>> tf.config.run_functions_eagerly(True) + >>> @tf.function + ... def fn(): + ... with tf.init_scope(): + ... print(tf.executing_eagerly()) + ... print(tf.executing_eagerly()) + >>> fn() + True + True + >>> tf.config.run_functions_eagerly(False) + + Inside a transformation function for `tf.dataset`: + + >>> def data_fn(x): + ... print(tf.executing_eagerly()) + ... return x + >>> dataset = tf.data.Dataset.range(100) + >>> dataset = dataset.map(data_fn) + False + + Returns: + `True` if the current thread has eager execution enabled. + """ + ctx = context_safe() + if ctx is None: + return default_execution_mode == EAGER_MODE + + return ctx.executing_eagerly() + + +@tf_export(v1=["executing_eagerly"]) +def executing_eagerly_v1(): + """Checks whether the current thread has eager execution enabled. + + Eager execution is typically enabled via + `tf.compat.v1.enable_eager_execution`, but may also be enabled within the + context of a Python function via tf.contrib.eager.py_func. + + When eager execution is enabled, returns `True` in most cases. However, + this API might return `False` in the following use cases. + + * Executing inside `tf.function`, unless under `tf.init_scope` or + `tf.config.run_functions_eagerly(True)` is previously called. + * Executing inside a transformation function for `tf.dataset`. + * `tf.compat.v1.disable_eager_execution()` is called. + + >>> tf.compat.v1.enable_eager_execution() + + General case: + + >>> print(tf.executing_eagerly()) + True + + Inside `tf.function`: + + >>> @tf.function + ... def fn(): + ... with tf.init_scope(): + ... print(tf.executing_eagerly()) + ... print(tf.executing_eagerly()) + >>> fn() + True + False + + Inside `tf.function` + after `tf.config.run_functions_eagerly(True)` is called: + + >>> tf.config.run_functions_eagerly(True) + >>> @tf.function + ... def fn(): + ... with tf.init_scope(): + ... print(tf.executing_eagerly()) + ... print(tf.executing_eagerly()) + >>> fn() + True + True + >>> tf.config.run_functions_eagerly(False) + + Inside a transformation function for `tf.dataset`: + + >>> def data_fn(x): + ... print(tf.executing_eagerly()) + ... return x + >>> dataset = tf.data.Dataset.range(100) + >>> dataset = dataset.map(data_fn) + False + + Returns: + `True` if the current thread has eager execution enabled. + """ + return executing_eagerly() + + +def in_eager_mode(): + """Use executing_eagerly() instead. This function will be removed.""" + return executing_eagerly() + + +def anonymous_name(): + """Returns the anonymous shared name. + + In eager mode we create anonymous resources to avoid spurious sharing issues. + The runtime generates a unique name on our behalf when the reserved + anonymous shared name is used as a shared name. + + Returns: + The anonymous shared name. + """ + + # The magic value is defined as + # `tensorflow::ResourceHandle::ANONYMOUS_NAME` in C++. + return "cd2c89b7-88b7-44c8-ad83-06c2a9158347" + + +def graph_mode(): + """Context-manager to disable eager execution for the current thread.""" + return context()._mode(GRAPH_MODE) # pylint: disable=protected-access + + +# Used by b/167638505 for keras backend API and Lambda layer. +@tf_export("__internal__.eager_context.eager_mode", v1=[]) +def eager_mode(): + """Context-manager to enable eager execution for the current thread.""" + return context()._mode(EAGER_MODE) # pylint: disable=protected-access + + +def scope_name(): + """Name of the current scope.""" + return context().scope_name + + +def device(name): + """Context-manager to force placement of operations and Tensors on a device. + + Example: + ```python + with tf.device('gpu:0'): + with tf.device('cpu:0'): + shape = tf.constant([], dtype=tf.int32) + x = tf.random.truncated_normal(shape, tf.float32) + ``` + will ensure that the `shape` Tensor is on CPU but the `truncated_normal` + operation runs on GPU 0. + + Args: + name: Name of the device (see context().devices()), or None to perform + automatic placement. + + Returns: + Context manager for setting the device. + """ + ensure_initialized() + return context().device(name) + + +# Expose some properties of Context as internally public APIs (b/160348781). +@tf_export("__internal__.eager_context.get_config", v1=[]) +def get_config(): + """Get the ConfigProto of Context. + + Returns: + The ConfigProto of Context. + """ + return context().config + + +@tf_export("__internal__.eager_context.get_device_name", v1=[]) +def get_device_name(): + """Get the device name for the current thread. + + Returns: + The device name for the current thread. + """ + return context().device_name + + +@tf_export("__internal__.eager_context.set_soft_device_placement", v1=[]) +def set_soft_device_placement(enabled): + """Set if soft device placements should be allowed. + + Args: + enabled: Whether to enable soft device placement. + """ + context().soft_device_placement = enabled + + +@tf_export("__internal__.eager_context.get_executor", v1=[]) +def get_executor(): + """Get the Executor of the current thread. + + Returns: + The Executor of the current thread. + """ + return context().executor + + +@tf_export("debugging.get_log_device_placement") +def get_log_device_placement(): + """Get if device placements are logged. + + Returns: + If device placements are logged. + """ + return context().log_device_placement + + +@tf_export("debugging.set_log_device_placement") +def set_log_device_placement(enabled): + """Turns logging for device placement decisions on or off. + + Operations execute on a particular device, producing and consuming tensors on + that device. This may change the performance of the operation or require + TensorFlow to copy data to or from an accelerator, so knowing where operations + execute is useful for debugging performance issues. + + For more advanced profiling, use the [TensorFlow + profiler](https://www.tensorflow.org/guide/profiler). + + Device placement for operations is typically controlled by a `tf.device` + scope, but there are exceptions, for example operations on a `tf.Variable` + which follow the initial placement of the variable. Turning off soft device + placement (with `tf.config.set_soft_device_placement`) provides more explicit + control. + + >>> tf.debugging.set_log_device_placement(True) + >>> tf.ones([]) + >>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:GPU:0 + >>> with tf.device("CPU"): + ... tf.ones([]) + >>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:CPU:0 + >>> tf.debugging.set_log_device_placement(False) + + Turning on `tf.debugging.set_log_device_placement` also logs the placement of + ops inside `tf.function` when the function is called. + + Args: + enabled: Whether to enabled device placement logging. + """ + context().log_device_placement = enabled + + +@tf_contextlib.contextmanager +def device_policy(policy): + """Context manager for setting device placement policy for current thread.""" + ctx = context() + old_policy = ctx.device_policy + try: + ctx.device_policy = policy + yield + finally: + ctx.device_policy = old_policy + + +def set_execution_mode(mode): + """Sets execution mode for the current thread.""" + context().execution_mode = mode + + +# TODO(fishx): remove this method. +@tf_contextlib.contextmanager +def execution_mode(mode): + """Context manager for setting execution mode for current thread.""" + if mode is None: + yield + else: + ctx = context() + executor_new = executor.new_executor(mode == ASYNC) + executor_old = ctx.executor + try: + executor_old.wait() + ctx.executor = executor_new + yield + finally: + ctx.executor = executor_old + executor_new.wait() + + +@tf_contextlib.contextmanager +def executor_scope(e): + """Context manager for changing executor for current thread. + + Args: + e: A Executor to execute eager ops under this scope. Setting it to None will + switch back to use the default executor for the context. + + Yields: + Context manager for setting the executor for current thread. + """ + ctx = context() + executor_old = ctx.executor + try: + ctx.executor = e + yield + finally: + ctx.executor = executor_old + + +@tf_export("experimental.function_executor_type") +@tf_contextlib.contextmanager +def function_executor_type(executor_type): + """Context manager for setting the executor of eager defined functions. + + Eager defined functions are functions decorated by tf.contrib.eager.defun. + + Args: + executor_type: a string for the name of the executor to be used to execute + functions defined by tf.contrib.eager.defun. + + Yields: + Context manager for setting the executor of eager defined functions. + """ + current_options = context().function_call_options + old_options = copy.copy(current_options) + try: + current_options.executor_type = executor_type + yield + finally: + context().function_call_options = old_options + + +def is_async(): + """Returns true if current thread is in async mode.""" + return context().is_async() + + +def num_gpus(): + """Get the number of available GPU devices. + + Returns: + The number of available GPU devices. + """ + return context().num_gpus() + + +def enable_run_metadata(): + """Enables tracing of op execution via RunMetadata. + + To retrieve the accumulated metadata call context.export_run_metadata() + and to stop tracing call context.disable_run_metadata(). + """ + context().enable_run_metadata() + + +def disable_run_metadata(): + """Disables tracing of op execution via RunMetadata.""" + context().disable_run_metadata() + + +def enable_graph_collection(): + """Enables graph collection of executed functions. + + To retrieve the accumulated graphs call context.export_run_metadata() + and to stop collecting graphs call context.disable_graph_collection(). + """ + context().enable_graph_collection() + + +def disable_graph_collection(): + """Disables graph collection of executed functions.""" + context().disable_graph_collection() + + +def export_run_metadata(): + """Returns a RunMetadata proto with accumulated information. + + The returned protocol buffer contains information since the most recent call + to either enable_run_metadata or export_run_metadata. + + Returns: + A RunMetadata protocol buffer. + """ + return context().export_run_metadata() + + +@contextlib.contextmanager +def collect_graphs(optimized=True): + """Collects a flat list of pre- or post-optimization graphs. + + The collected graphs include device placements, which can be useful for + testing. + + Usage: + + ``` + @def_function.function + def f(x): + return x + constant_op.constant(1.) + + with context.collect_graphs() as graphs: + with ops.device("CPU:0"): + f(constant_op.constant(1.)) + + graph, = graphs # `graph` contains a single GraphDef for inspection + ``` + + Args: + optimized: whether to collect optimized graphs or non-optimized graphs + + Yields: + A list of GraphDefs, populated when the context manager exits. + """ + ctx = context() + ctx.enable_graph_collection() + try: + graphs = [] + yield graphs + metadata = ctx.export_run_metadata() + finally: + ctx.disable_graph_collection() + for graph in metadata.function_graphs: + if optimized: + graphs.append(graph.post_optimization_graph) + else: + graphs.append(graph.pre_optimization_graph) + + +def get_server_def(): + return context().get_server_def() + + +def set_server_def(server_def): + context().set_server_def(server_def) + + +def set_server_def_retries(retries): + """Set the number of retries to use when calling SetServerDef. + + In cases where many servers run in high-preemption environments, jobs could + be preempted during startup and initial connection via SetServerDef. Retries + allow for more robust connection in these environments. + + + Args: + retries: int specifying the number of connection retries before failing. + Retries follow an exponential backoff waiting period with min value 1ms, + max value 10s, and exponent 1.3. + """ + context().set_server_def_retries(retries) + + +def update_server_def(server_def): + context().update_server_def(server_def) + + +def check_alive(worker_name): + return context().check_alive(worker_name) + + +@tf_export("experimental.async_scope") +@tf_contextlib.contextmanager +def async_scope(): + """Context manager for grouping async operations. + + Ops/function calls inside the scope can return before finishing the actual + execution. When exiting the async scope, a synchronization barrier will be + automatically added to ensure the completion of all async op and function + execution, potentially raising exceptions if async execution results in + an error state. + + Users may write the following code to asynchronously invoke `train_step_fn` + and log the `loss` metric for every `num_steps` steps in a training loop. + `train_step_fn` internally consumes data using `iterator.get_next()`, and may + throw OutOfRangeError when running out of data. In the case: + + ``` + try: + with tf.experimental.async_scope(): + for _ in range(num_steps): + # Step function updates the metric `loss` internally + train_step_fn() + except tf.errors.OutOfRangeError: + tf.experimental.async_clear_error() + logging.info('loss = %s', loss.numpy()) + ``` + + Yields: + Context manager for grouping async operations. + """ + # TODO(haoyuzhang): replace env var once we have a config method to turn on + # and off async streaming RPC + remote_async_env_var = "TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE" + old_policy = os.environ.get(remote_async_env_var) + try: + os.environ[remote_async_env_var] = str(True) + yield + # Note: sync local and remote executors iff the async block does not raise + # an exception. Triggering sync after an exception may lead to derived + # runtime errors and unexpected exception types. + context().sync_executors() + finally: + if old_policy is None: + del os.environ[remote_async_env_var] + else: + os.environ[remote_async_env_var] = old_policy + + +def async_wait(): + """Sync all async operations and raise any errors during execution. + + In async execution mode, an op/function call can return before finishing the + actual execution. Calling this method creates a synchronization barrier for + all async op and function execution. It only returns when all pending nodes + are finished, potentially raising exceptions if async execution results in + an error state. It is a no-op if the context is not initialized. + """ + disable_async_executor_env_var = "TF_PS_DISABLE_ASYNC_EXECUTOR_GLOBALLY" + if os.environ.get(disable_async_executor_env_var) == str(True): + return + if context()._context_handle is not None: # pylint: disable=protected-access + context().sync_executors() + + +@tf_export("experimental.async_clear_error") +def async_clear_error(): + """Clear pending operations and error statuses in async execution. + + In async execution mode, an error in op/function execution can lead to errors + in subsequent ops/functions that are scheduled but not yet executed. Calling + this method clears all pending operations and reset the async execution state. + + Example: + + ``` + while True: + try: + # Step function updates the metric `loss` internally + train_step_fn() + except tf.errors.OutOfRangeError: + tf.experimental.async_clear_error() + break + logging.info('loss = %s', loss.numpy()) + ``` + """ + context().clear_executor_errors() + + +def add_c_function(c_func): + """Add a C API TF_Function to the context.""" + context().add_c_function(c_func) + + +def get_c_function(name): + """Get a C API TF_Function from the context.""" + return context().get_c_function(name) + + +def remove_function(name): + """Remove a function from the context.""" + context().remove_function(name) + + +def get_function_def(name): + return context().get_function_def(name) + + +def is_custom_device(device_name): + """Calls TFE_IsCustomDevice. + + Enables using C extensions specifying a custom device from Python. See the + experimental eager C API in tensorflow/c/eager/c_api_experimental.h for + details. + + Args: + device_name: A string indicating the name to check whether it is a + registered custom device. + + Returns: + A boolean. + """ + return context().is_custom_device(device_name) + + +def register_custom_device(device_capsule, device_name, device_info_capsule): + """Calls TFE_RegisterCustomDevice to register a custom device with Python. + + Enables using C extensions specifying a custom device from Python. See the + experimental eager C API in tensorflow/c/eager/c_api_experimental.h for + details. + + Note that custom devices are not currently supported inside `tf.function`s. + + Args: + device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice' + containing a pointer to a TFE_CustomDevice struct. The capsule retains + ownership of the memory. + device_name: A string indicating the name to register the custom device + under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may + subsequently be passed to `with tf.device(...):`. + device_info_capsule: A PyCapsule with the name set to + 'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific + struct with the initial state of the custom device (the void* device_info + argument to TFE_RegisterCustomDevice). This method takes ownership of the + memory and clears the capsule destructor. + """ + context().register_custom_device(device_capsule, device_name, + device_info_capsule) + + +# Not every user creates a Context via context.context() +# (for example, enable_eager_execution in python/framework/ops.py), +# but they do all import this file. Note that IS_IN_GRAPH_MODE and +# in_graph_mode are both parameterless functions. +def _tmp_in_graph_mode(): + if context_safe() is None: + # Context not yet initialized. Assume graph mode following the + # default implementation in `is_in_graph_mode`. + return True + return not executing_eagerly() + + +is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/core.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/core.py new file mode 100644 index 0000000000000000000000000000000000000000..9519fdc0000848561b9f9afb97c9c386b17c53fc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/core.py @@ -0,0 +1,78 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental API for TensorFlow's "Eager" mode of execution.""" + +from tensorflow.python import pywrap_tfe +from tensorflow.python.framework import errors +from tensorflow.python.platform import tf_logging as logging + +# Trace of execution and memory usage. +_active_trace = None + + +def _status_to_exception(status): + try: + error_class = errors.exception_type_from_error_code(status.code) + e = error_class(None, None, status.message, status.payloads) + logging.error_log("%s: %s" % (e.__class__.__name__, e)) + return e + except KeyError: + e = errors.UnknownError( + None, None, status.message, status.code, status.payloads + ) + logging.error_log("%s: %s" % (e.__class__.__name__, e)) + return e + + +class _NotOkStatusException(Exception): + """Exception class to handle not ok Status.""" + + def __init__(self, message, code, payloads): + super(_NotOkStatusException, self).__init__() + self.message = message + self.code = code + self.payloads = payloads + + def __str__(self): + e = _status_to_exception(self) + return "%s: %s" % (e.__class__.__name__, e) + + +pywrap_tfe.TFE_Py_RegisterExceptionClass(_NotOkStatusException) + + +class _FallbackException(Exception): + """Exception class to handle fallback from the fastpath. + + The fastpath that we refer to here is the one implemented to reduce per-op + overheads (TFE_Py_FastPathExecute_C). If the conditions for executing the op + on the fastpath are not met, we fallback to a safer (and more complete) + slowpath, and this Exception is raised to signal that transition. + """ + pass + + +class _SymbolicException(Exception): + """Exception class to handle use of symbolic tensors when executing eagerly. + + `keras.Input()` creates symbolic tensors (in a FuncGraph managed by the + Keras backend) while in eager execution. This exception is used to + identify this case (raised in `convert_to_tensor` cause generated functions + for ops to construct graphs instead of executing the kernel). + """ + pass + + +pywrap_tfe.TFE_Py_RegisterFallbackExceptionClass(_FallbackException) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/def_function.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/def_function.py new file mode 100644 index 0000000000000000000000000000000000000000..07d85f0f4bee86ed25e88231b8a78e1a8c9bf78f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/def_function.py @@ -0,0 +1,28 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Supports old symbols supplied by this file while the code is refactored.""" + +# pylint:disable=unused-import,g-bad-import-order + +# Config Options +from tensorflow.python.eager.polymorphic_function.eager_function_run import run_functions_eagerly +from tensorflow.python.eager.polymorphic_function.eager_function_run import functions_run_eagerly + +# tf.function Classes +from tensorflow.python.eager.polymorphic_function.polymorphic_function import Function +from tensorflow.python.eager.polymorphic_function.polymorphic_function import function + +# Private attributes +from tensorflow.python.eager.polymorphic_function.polymorphic_function import _tf_function_counter diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/execute.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/execute.py new file mode 100644 index 0000000000000000000000000000000000000000..94236fa66fdfccf9d765e3ad2f48635fa91db970 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/execute.py @@ -0,0 +1,329 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions called by the generated code to execute an eager-mode op.""" + +from google.protobuf import text_format +from tensorflow.core.framework import tensor_pb2 +from tensorflow.python import pywrap_tfe +from tensorflow.python.eager import core +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_conversion_registry +from tensorflow.python.framework import tensor_shape +from tensorflow.python.types import core as core_types +from tensorflow.python.util import compat + + +def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None): + """Execute a TensorFlow operation. + + Args: + op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to + execute. + num_outputs: The number of outputs of the operation to fetch. (Explicitly + provided instead of being inferred for performance reasons). + inputs: A list of inputs to the operation. Each entry should be a Tensor, or + a value which can be passed to the Tensor constructor to create one. + attrs: A tuple with alternating string attr names and attr values for this + operation. + ctx: The value of context.context(). + name: Customized name for the operation. + + Returns: + List of output Tensor objects. The list is empty if there are no outputs + + Raises: + An exception on error. + """ + device_name = ctx.device_name + # pylint: disable=protected-access + try: + ctx.ensure_initialized() + tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, + inputs, attrs, num_outputs) + except core._NotOkStatusException as e: + if name is not None: + e.message += " name: " + name + raise core._status_to_exception(e) from None + except TypeError as e: + keras_symbolic_tensors = [x for x in inputs if _is_keras_symbolic_tensor(x)] + if keras_symbolic_tensors: + raise core._SymbolicException( + "Inputs to eager execution function cannot be Keras symbolic " + "tensors, but found {}".format(keras_symbolic_tensors)) + raise e + # pylint: enable=protected-access + return tensors + + +def execute_with_cancellation(op_name, + num_outputs, + inputs, + attrs, + ctx, + cancellation_manager, + name=None): + """Execute a TensorFlow operation. + + Args: + op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to + execute. + num_outputs: The number of outputs of the operation to fetch. (Explicitly + provided instead of being inferred for performance reasons). + inputs: A list of inputs to the operation. Each entry should be a Tensor, or + a value which can be passed to the Tensor constructor to create one. + attrs: A tuple with alternating string attr names and attr values for this + operation. + ctx: The value of context.context(). + cancellation_manager: a `CancellationManager` object that can be used to + cancel the operation. + name: Customized name for the operation. + + Returns: + List of output Tensor objects. The list is empty if there are no outputs + + Raises: + An exception on error. + """ + device_name = ctx.device_name + # pylint: disable=protected-access + try: + ctx.ensure_initialized() + tensors = pywrap_tfe.TFE_Py_ExecuteCancelable(ctx._handle, device_name, + op_name, inputs, attrs, + cancellation_manager._impl, + num_outputs) + except core._NotOkStatusException as e: + if name is not None: + e.message += " name: " + name + raise core._status_to_exception(e) from None + except TypeError as e: + keras_symbolic_tensors = [x for x in inputs if _is_keras_symbolic_tensor(x)] + if keras_symbolic_tensors: + raise core._SymbolicException( + "Inputs to eager execution function cannot be Keras symbolic " + "tensors, but found {}".format(keras_symbolic_tensors)) + raise e + # pylint: enable=protected-access + return tensors + + +def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None): + """Monkey-patch to execute to enable execution callbacks.""" + tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) + for callback in ctx.op_callbacks: + callback(op_name, tuple(inputs), attrs, tensors, name) + + return tensors + + +execute = quick_execute + + +def must_record_gradient(): + """Import backprop if you want gradients recorded.""" + return False + + +def record_gradient(unused_op_name, unused_inputs, unused_attrs, + unused_outputs): + """Import backprop if you want gradients recorded.""" + pass + + +def make_float(v, arg_name): + if not isinstance(v, compat.real_types): + raise TypeError("Expected float for argument '%s' not %s." % + (arg_name, repr(v))) + return float(v) + + +def make_int(v, arg_name): + if isinstance(v, str): + raise TypeError("Expected int for argument '%s' not %s." % + (arg_name, repr(v))) + try: + return int(v) + except (ValueError, TypeError): + raise TypeError("Expected int for argument '%s' not %s." % + (arg_name, repr(v))) + + +def make_str(v, arg_name): + if not isinstance(v, compat.bytes_or_text_types): + raise TypeError("Expected string for argument '%s' not %s." % + (arg_name, repr(v))) + return compat.as_bytes(v) # Convert unicode strings to bytes. + + +def make_bool(v, arg_name): + if not isinstance(v, bool): + raise TypeError("Expected bool for argument '%s' not %s." % + (arg_name, repr(v))) + return v + + +def make_type(v, arg_name): + try: + v = dtypes.as_dtype(v).base_dtype + except TypeError: + raise TypeError("Expected DataType for argument '%s' not %s." % + (arg_name, repr(v))) + i = v.as_datatype_enum + return i + + +def make_shape(v, arg_name): + """Convert v into a list.""" + # Args: + # v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape. + # arg_name: String, for error messages. + + # Returns: + # None if the rank is unknown, otherwise a list of ints (or Nones in the + # position where the dimension is unknown). + try: + shape = tensor_shape.as_shape(v) + except TypeError as e: + raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e)) + except ValueError as e: + raise ValueError("Error converting %s to a TensorShape: %s." % + (arg_name, e)) + if shape.ndims is None: + return None + else: + return shape.as_list() + + +def make_tensor(v, arg_name): + """Ensure v is a TensorProto.""" + if isinstance(v, tensor_pb2.TensorProto): + return v + elif isinstance(v, str): + pb = tensor_pb2.TensorProto() + text_format.Merge(v, pb) + return pb + raise TypeError( + "Don't know how to convert %s to a TensorProto for argument '%s'." % + (repr(v), arg_name)) + + +def args_to_matching_eager(l, ctx, allowed_dtypes, default_dtype=None): + """Convert sequence `l` to eager same-type Tensors.""" + del ctx # Unused + if (not l) and (default_dtype is not None): + return default_dtype, [] # List is empty; assume default dtype. + for x in l: + if not isinstance(x, core_types.Value): + break + else: # note: intentional for-else + return l[0]._datatype_enum(), l # pylint: disable=protected-access + + # Is some input already a Tensor with a dtype? + dtype = None + for t in l: + if isinstance(t, core_types.Value): + dtype = t.dtype + break + + if dtype is None: + # Infer a dtype based on the first value, and use that dtype for the + # remaining values. + + ret = [] + for t in l: + tensor = None + # First see if we can get a valid dtype with the default conversion + # and see if it matches an allowed dtypes. Some ops like ConcatV2 may + # not list allowed dtypes, in which case we should skip this. + if dtype is None and allowed_dtypes: + tensor = tensor_conversion_registry.convert(t) + # If we did not match an allowed dtype, try again with the default + # dtype. This could be because we have an empty tensor and thus we + # picked the wrong type. + if tensor.dtype not in allowed_dtypes: + tensor = None + + if tensor is None: + tensor = tensor_conversion_registry.convert( + t, dtype, preferred_dtype=default_dtype + ) + + ret.append(tensor) + if dtype is None: + dtype = tensor.dtype + else: + ret = [tensor_conversion_registry.convert(t, dtype) for t in l] + + # TODO(slebedev): consider removing this as it leaks a Keras concept. + # pylint: disable=protected-access + keras_symbolic_tensors = [x for x in ret if _is_keras_symbolic_tensor(x)] + if keras_symbolic_tensors: + raise core._SymbolicException( + "Using symbolic output of a Keras layer during eager execution " + "{}".format(keras_symbolic_tensors)) + # pylint: enable=protected-access + return dtype.as_datatype_enum, ret + + +def convert_to_mixed_eager_tensors(values, ctx): + del ctx # Unused + v = [tensor_conversion_registry.convert(t) for t in values] + types = [t._datatype_enum() for t in v] # pylint: disable=protected-access + return types, v + + +def args_to_mixed_eager_tensors(lists, ctx): + """Converts a list of same-length lists of values to eager tensors.""" + del ctx # Unused + assert len(lists) > 1 + + # Generate an error if len(lists[i]) is not the same for all i. + lists_ret = [[]] + for l in lists[1:]: + if len(l) != len(lists[0]): + raise ValueError( + "Expected list arguments to be the same length: %d != %d (%r vs. %r)." + % (len(lists[0]), len(l), lists[0], l)) + lists_ret.append([]) + + # Convert the first element of each list first, then the second element, etc. + types = [] + for i in range(len(lists[0])): + dtype = None + # If any list has a Tensor, use that dtype + for l in lists: + if isinstance(l[i], core_types.Value): + dtype = l[i].dtype + break + if dtype is None: + # Convert the first one and use its dtype. + lists_ret[0].append(tensor_conversion_registry.convert(lists[0][i])) + dtype = lists_ret[0][i].dtype + for j in range(1, len(lists)): + lists_ret[j].append( + tensor_conversion_registry.convert(lists[j][i], dtype=dtype) + ) + else: + # Convert everything to the found dtype. + for j in range(len(lists)): + lists_ret[j].append( + tensor_conversion_registry.convert(lists[j][i], dtype=dtype) + ) + types.append(dtype.as_datatype_enum) + return types, lists_ret + + +def _is_keras_symbolic_tensor(x): + return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph" diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/executor.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..4d01f8e9cab010ca6bf37325d06b47728baeda12 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/executor.py @@ -0,0 +1,77 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Executor for eager execution.""" + +from tensorflow.python import pywrap_tfe + + +class Executor(object): + """A class for handling eager execution. + + The default behavior for asynchronous execution is to serialize all ops on + a single thread. Having different `Executor` objects in different threads + enables executing ops asynchronously in parallel: + + ```python + def thread_function(): + executor = executor.Executor(enable_async=True): + context.set_executor(executor) + + a = threading.Thread(target=thread_function) + a.start() + b = threading.Thread(target=thread_function) + b.start() + ``` + """ + + __slots__ = ["_handle"] + + def __init__(self, handle): + self._handle = handle + + def __del__(self): + try: + self.wait() + pywrap_tfe.TFE_DeleteExecutor(self._handle) + except TypeError: + # Suppress some exceptions, mainly for the case when we're running on + # module deletion. Things that can go wrong include the pywrap module + # already being unloaded, self._handle. no longer being + # valid, and so on. Printing warnings in these cases is silly + # (exceptions raised from __del__ are printed as warnings to stderr). + pass # 'NoneType' object is not callable when the handle has been + # partially unloaded. + + def is_async(self): + return pywrap_tfe.TFE_ExecutorIsAsync(self._handle) + + def handle(self): + return self._handle + + def wait(self): + """Waits for ops dispatched in this executor to finish.""" + pywrap_tfe.TFE_ExecutorWaitForAllPendingNodes(self._handle) + + def clear_error(self): + """Clears errors raised in this executor during execution.""" + pywrap_tfe.TFE_ExecutorClearError(self._handle) + + +def new_executor(enable_async, + enable_streaming_enqueue=True, + in_flight_nodes_limit=0): + handle = pywrap_tfe.TFE_NewExecutor(enable_async, enable_streaming_enqueue, + in_flight_nodes_limit) + return Executor(handle) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/forwardprop.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/forwardprop.py new file mode 100644 index 0000000000000000000000000000000000000000..1a02be97bd6bfc866cbfd7c8d0cac2f6383133f8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/forwardprop.py @@ -0,0 +1,487 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for forward-mode automatic differentiation.""" + +import functools +import threading + +from tensorflow.core.function.polymorphism import function_cache +from tensorflow.python import pywrap_tfe +from tensorflow.python.eager import backprop +from tensorflow.python.eager import backprop_util +from tensorflow.python.eager import execute +from tensorflow.python.eager import forwardprop_util +from tensorflow.python.eager.polymorphic_function import tracing_compilation +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops.parallel_for import control_flow_ops +from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + + +# Dictionary mapping from op names to special-cased jvp functions. Otherwise +# backward functions are transposed on the tape. +_SPECIAL_CASES = {} + + +def _identity_jvp(attr_tuple, inputs, outputs, tangents): + # Special-cased mostly for resource handles, where creating ones Tensors from + # handle data for transposing the backward function on the tape is error-prone + # (even if we get good handle data, partially defined shapes are an issue). + del attr_tuple, inputs, outputs + return [array_ops.identity(t) for t in tangents] + + +_SPECIAL_CASES["Identity"] = _identity_jvp + + +def _read_variable_jvp(attr_tuple, inputs, outputs, tangents): + # Like for Identity, this special case means we don't need to create + # variable-shaped Tensors from resource handles. + del attr_tuple, inputs, outputs + return [array_ops.identity(t) for t in tangents] + + +_SPECIAL_CASES["ReadVariableOp"] = _read_variable_jvp + + +_TRACE_COUNT_CONSISTENCY_LOCK = threading.Lock() +# Map from op names to number of traces of _jvp_helper. Used to cap the number +# of traces due to shape differences while still specializing where possible. +_TRACE_COUNT = {} + + +def _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents): + """Computes a Jacobian-vector product for an op. + + Note that this function would be wasteful if executed eagerly. It runs the + backward gradient function and throws away the result just to record its + operations on a GradientTape. These unused ops are pruned away when this + function is traced. + + Args: + op_name: A string, the type of operation being executed. + attr_tuple: Attributes of the operation. + inputs: A flat list of input Tensors to the operation. + outputs: A flat list of output Tensors from the operation. + tangents: A flat list of Tensors, same shape as `inputs`. + + Returns: + A flat list of tangents corresponding to `outputs`. + """ + with _TRACE_COUNT_CONSISTENCY_LOCK: + # Just make sure writes don't clobber each other's increments; reads in + # _jvp_dispatch do not lock. + _TRACE_COUNT[op_name] = _TRACE_COUNT.get(op_name, 0) + 1 + + special_case = _SPECIAL_CASES.get(op_name, None) + if special_case is not None: + return special_case(attr_tuple, inputs, outputs, tangents) + if not outputs: + # tape.gradients([], inputs) doesn't make much sense + return [] + # Generally inner GradientTapes won't function while outer accumulators are + # recording. We temporarily reset forwardprop state to allow GradientTapes to + # function here. + with forwardprop_util.push_forwardprop_state(): + trainable_inputs = [] + trainable_indices = [] + nontrivial_tangents = [] + for input_index, tensor in enumerate(inputs): + if backprop_util.IsTrainable(tensor): + trainable_inputs.append(tensor) + trainable_indices.append(input_index) + nontrivial_tangents.append(tangents[input_index]) + + with backprop.GradientTape() as transpose_tape: + with backprop.GradientTape() as backfunc_tape: + backfunc_tape.watch(trainable_inputs) + execute.record_gradient(op_name, inputs, attr_tuple, outputs) + + forwardprop_aids = [] + trainable_outputs = [] + nontrivial_output_indices = [] + for output_index, output in enumerate(outputs): + if backprop_util.IsTrainable(output): + forwardprop_aids.append( + array_ops.ones_like(output, name="unused_forwardprop_aid")) + trainable_outputs.append(output) + nontrivial_output_indices.append(output_index) + + transpose_tape.watch(forwardprop_aids) + grads = backfunc_tape.gradient( + trainable_outputs, + trainable_inputs, + forwardprop_aids, + unconnected_gradients=UnconnectedGradients.ZERO) + nontrivial_output_tangents = transpose_tape.gradient( + grads, forwardprop_aids, output_gradients=nontrivial_tangents) + output_tangents = [None] * len(outputs) + for index, tangent in zip(nontrivial_output_indices, + nontrivial_output_tangents): + output_tangents[index] = tangent + return output_tangents + + +def _jvp_helper_wrapper(op_name, attr_tuple, inputs, outputs, tangents, + use_batch): + """Computes a batch of Jacobian-vector product for an op. + + Args: + op_name: A string, the type of operation being executed. + attr_tuple: Attributes of the operation. + inputs: A flat list of input Tensors to the operation. + outputs: A flat list of output Tensors from the operation. + tangents: A flat list of Tensors, compatible with shape `[None] + + input_shape`. + use_batch: A bool, True to vetorize over batch of tangents of shape `[None] + + input_shape`. + + Returns: + A flat list of tangents compatible with `outputs` + or `[None] + output_shape`. + + Raises: + ValueError: if tangent shapes are not compatible with input shapes. + """ + if use_batch: + for primal, tangent in zip(inputs, tangents): + if not tangent.shape.is_compatible_with([None] + primal.shape): + raise ValueError("Tangent {} was expected to be of shape " + "{} but is instead of shape {}".format( + tangent, [None] + primal.shape, tangent.shape)) + + return control_flow_ops.vectorized_map( + functools.partial(_jvp_helper, op_name, attr_tuple, inputs, outputs), + tangents, + ) + return _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents) + + +# TODO(allenl): reduce_retracing for gradients which rely on static +# shape information are underspecialized. We may want hand-written forward +# implementations, or a more satisfying story about how we re-specialize +# gradients which were traced with relaxed shapes (e.g. use conds instead of +# trace-time Python logic). +# +# Using function.defun rather than def_function.function avoids +# tf.config.run_functions_eagerly(True). `_jvp_helper` doesn't successfully run +# eagerly (infinite recursion), and even if it did it would use extra memory and +# run unnecessary computation. The function does not create variables, so the +# two symbols are otherwise equivalent. +_jvp_function_cache = function_cache.FunctionCache() +_jvp_relaxed_config = tracing_compilation.TracingOptions( + _jvp_helper_wrapper, + name="_jvp_relaxed_shapes", + reduce_retracing=True, + function_cache=_jvp_function_cache, +) + +_jvp_exact_config = tracing_compilation.TracingOptions( + _jvp_helper_wrapper, + name="_jvp_exact_shapes", + reduce_retracing=False, + function_cache=_jvp_function_cache, +) + +# The maximum number of exact-shape traces to perform for a single op before +# switching to shape relaxation. +_TRACE_COUNT_LIMIT = 32 + + +def _jvp_dispatch(op_name, + attr_tuple, + inputs, + outputs, + tangents, + use_batch=False): + """Determine which forwardprop function to call.""" + # Note that this _TRACE_COUNT read races with writes. That's fine, it just + # means we may trace a few more exact shapes before moving on to relaxation. + if _TRACE_COUNT.get(op_name, 0) < _TRACE_COUNT_LIMIT: + config = _jvp_exact_config + else: + config = _jvp_relaxed_config + return tracing_compilation.call_function( + (op_name, attr_tuple, inputs, outputs, tangents, use_batch), + tracing_options=config, + ) + + +pywrap_tfe.TFE_Py_RegisterJVPFunction(_jvp_dispatch) + + +@tf_export("autodiff.ForwardAccumulator", v1=[]) +class ForwardAccumulator(): + """Computes Jacobian-vector products ("JVP"s) using forward-mode autodiff. + + Compare to `tf.GradientTape` which computes vector-Jacobian products ("VJP"s) + using reverse-mode autodiff (backprop). Reverse mode is more attractive when + computing gradients of a scalar-valued function with respect to many inputs + (e.g. a neural network with many parameters and a scalar loss). Forward mode + works best on functions with many outputs and few inputs. Since it does not + hold on to intermediate activations, it is much more memory efficient than + backprop where it is applicable. + + Consider a simple linear regression: + + >>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) + >>> targets = tf.constant([[1.], [-1.]]) + >>> dense = tf.keras.layers.Dense(1) + >>> dense.build([None, 2]) + >>> with tf.autodiff.ForwardAccumulator( + ... primals=dense.kernel, + ... tangents=tf.constant([[1.], [0.]])) as acc: + ... loss = tf.reduce_sum((dense(x) - targets) ** 2.) + >>> acc.jvp(loss) + + + The example has two variables containing parameters, `dense.kernel` (2 + parameters) and `dense.bias` (1 parameter). Considering the training data `x` + as a constant, this means the Jacobian matrix for the function mapping from + parameters to loss has one row and three columns. + + With forwardprop, we specify a length-three vector in advance which multiplies + the Jacobian. The `primals` constructor argument is the parameter (a + `tf.Tensor` or `tf.Variable`) we're specifying a vector for, and the + `tangents` argument is the "vector" in Jacobian-vector product. If our goal is + to compute the entire Jacobian matrix, forwardprop computes one column at a + time while backprop computes one row at a time. Since the Jacobian in the + linear regression example has only one row, backprop requires fewer + invocations: + + >>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) + >>> targets = tf.constant([[1.], [-1.]]) + >>> dense = tf.keras.layers.Dense(1) + >>> dense.build([None, 2]) + >>> loss_fn = lambda: tf.reduce_sum((dense(x) - targets) ** 2.) + >>> kernel_fprop = [] + >>> with tf.autodiff.ForwardAccumulator( + ... dense.kernel, tf.constant([[1.], [0.]])) as acc: + ... kernel_fprop.append(acc.jvp(loss_fn())) + >>> with tf.autodiff.ForwardAccumulator( + ... dense.kernel, tf.constant([[0.], [1.]])) as acc: + ... kernel_fprop.append(acc.jvp(loss_fn())) + >>> with tf.autodiff.ForwardAccumulator(dense.bias, tf.constant([1.])) as acc: + ... bias_fprop = acc.jvp(loss_fn()) + >>> with tf.GradientTape() as tape: + ... loss = loss_fn() + >>> kernel_grad, bias_grad = tape.gradient(loss, (dense.kernel, dense.bias)) + >>> np.testing.assert_allclose( + ... kernel_grad, tf.stack(kernel_fprop)[:, tf.newaxis]) + >>> np.testing.assert_allclose(bias_grad, bias_fprop[tf.newaxis]) + + Implicit in the `tape.gradient` call is a length-one vector which + left-multiplies the Jacobian, a vector-Jacobian product. + + `ForwardAccumulator` maintains JVPs corresponding primal tensors it is + watching, derived from the original `primals` specified in the constructor. As + soon as a primal tensor is deleted, `ForwardAccumulator` deletes the + corresponding JVP. + + `acc.jvp(x)` retrieves `acc`'s JVP corresponding to the primal tensor `x`. It + does not perform any computation. `acc.jvp` calls can be repeated as long as + `acc` is accessible, whether the context manager is active or not. New JVPs + are only computed while the context manager is active. + + Note that `ForwardAccumulator`s are always applied in the order their context + managers were entered, so inner accumulators will not see JVP computation from + outer accumulators. Take higher-order JVPs from outer accumulators: + + >>> primal = tf.constant(1.1) + >>> with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as outer: + ... with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as inner: + ... primal_out = primal ** tf.constant(3.5) + >>> inner_jvp = inner.jvp(primal_out) + >>> inner_jvp # 3.5 * 1.1 ** 2.5 + + >>> outer.jvp(inner_jvp) # 3.5 * 2.5 * 1.1 ** 1.5 + + + Reversing the collection in the last line to instead retrieve + `inner.jvp(outer.jvp(primal_out))` will not work. + + Strict nesting also applies to combinations of `ForwardAccumulator` and + `tf.GradientTape`. More deeply nested `GradientTape` objects will ignore the + products of outer `ForwardAccumulator` objects. This allows (for example) + memory-efficient forward-over-backward computation of Hessian-vector products, + where the inner `GradientTape` would otherwise hold on to all intermediate + JVPs: + + >>> v = tf.Variable([1., 2.]) + >>> with tf.autodiff.ForwardAccumulator( + ... v, + ... # The "vector" in Hessian-vector product. + ... tf.constant([1., 0.])) as acc: + ... with tf.GradientTape() as tape: + ... y = tf.reduce_sum(v ** 3.) + ... backward = tape.gradient(y, v) + >>> backward # gradient from backprop + + >>> acc.jvp(backward) # forward-over-backward Hessian-vector product + + """ + + def __init__(self, primals, tangents): + """Specify tensors to watch and their Jacobian-vector products. + + Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix + (a Jacobian-vector product) for the function computed while this accumulator + is active. Since JVPs are computed in forward mode as the computation + happens, this vector must be supplied in advance. + + Listing a single tensor multiple times in `primals` raises an + exception. Excluding a tensor from `primals` is equivalent to watching it + with a tangent tensor of zeros. + + Args: + primals: A tensor or nested structure of tensors to watch. + tangents: A tensor or nested structure of tensors, with the same nesting + structure as `primals`, with each element being a vector with the same + size as the corresponding primal element. + + Raises: + ValueError: If the same tensor or variable is specified multiple times in + `primals`. + """ + self._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(False) + self._recording = False + primal_ids = set() + for primal in nest.flatten(primals): + if id(primal) in primal_ids: + raise ValueError( + "Tensor {} was specified as a primal multiple times. This may " + "indicate an error. If it was intended, please sum the " + "corresponding tangents.") + primal_ids.add(id(primal)) + self._watch(primals, tangents) + + def __enter__(self): + self._push_accumulator() + return self + + def __exit__(self, typ, value, traceback): + if self._recording: + self._pop_accumulator() + + def _push_accumulator(self): + if self._recording: + raise ValueError("Accumulator is already recording.") + pywrap_tfe.TFE_Py_ForwardAccumulatorSetAdd(self._accumulator) + self._recording = True + + def _pop_accumulator(self): + if not self._recording: + raise ValueError("Accumulator is not recording.") + pywrap_tfe.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator) + self._recording = False + + def _watch(self, primals, tangents): + """Ensures that `primals` are being traced by this accumulator. + + Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix + (a Jacobian-vector product) for the function computed while this accumulator + is active. Since JVPs are computed in forward mode as the computation + happens, this vector must be supplied in advance. + + Watching a single tensor multiple times sums each of its `tangents`. Any + un-watched tensor has zeros for its tangent vector. + + Args: + primals: A Tensor or list of Tensors. + tangents: A Tensor or list of Tensors matching `primals`. + """ + + def _watch(primal, tangent): + if not primal.dtype.is_floating: + logging.log_first_n( + logging.WARN, "The dtype of the watched primal must be " + "floating (e.g. tf.float32), got %r", 5, primal.dtype) + tangent = ops.convert_to_tensor(tangent, dtype=primal.dtype) + if hasattr(primal, "handle"): + # Run convert_to_tensor to get the captured handle from whichever + # function we're running if necessary. + primal = ops.convert_to_tensor(primal.handle) + pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, primal, + tangent) + + nest.map_structure(_watch, primals, tangents) + + def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE): + """Fetches the Jacobian-vector product computed for `primals`. + + Note that this method performs no computation, and simply looks up a JVP + that was already computed (unlike backprop using a `tf.GradientTape`, where + the computation happens on the call to `tape.gradient`). + + Args: + primals: A watched Tensor or structure of Tensors to fetch the JVPs for. + unconnected_gradients: A value which can either hold 'none' or 'zero' and + alters the value which will be returned if no JVP was computed for + `primals`. The possible values and effects are detailed in + 'tf.UnconnectedGradients' and it defaults to 'none'. + + Returns: + Tensors with the same shapes and dtypes as `primals`, or None if no JVP + is available. + """ + unconnected_gradients = UnconnectedGradients(unconnected_gradients) + if self._accumulator is None: + raise ValueError("Called jvp() without first tracing anything.") + + def _fetch_jvp(tensor): + if hasattr(tensor, "handle"): + unwrapped_tensor = ops.convert_to_tensor(tensor.handle) + else: + unwrapped_tensor = tensor + result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator, + unwrapped_tensor) + if result is None and unconnected_gradients == UnconnectedGradients.ZERO: + result = array_ops.zeros_like(tensor) + return result + + return nest.map_structure(_fetch_jvp, primals) + + @classmethod + def _batch_accumulator(cls, primals, tangents): + """Factory constructor to test accumulator on batches of tangents. + + Args: + primals: A tensor or nested structure of tensors to watch. + tangents: A tensor or nested structure of tensors, with the same nesting + structure as `primals`, with each element being a vector with compatible + shape `[None] + primal.shape` of the corresponding primal element. + + Returns: + A batch accumulator object. + """ + acc = super(ForwardAccumulator, cls).__new__(cls, primals, tangents) + acc._recording = False + acc._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(True) + primal_ids = set() + for primal, tangent in zip(nest.flatten(primals), nest.flatten(tangents)): + tangent.shape.assert_is_compatible_with( + tensor_shape.TensorShape([None]) + primal.shape) + if id(primal) in primal_ids: + raise ValueError( + "Tensor {} was specified as a primal multiple times. This may " + "indicate an error. If it was intended, please sum the " + "corresponding tangents.") + primal_ids.add(id(primal)) + acc._watch(primals, tangents) + return acc diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/forwardprop_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/forwardprop_util.py new file mode 100644 index 0000000000000000000000000000000000000000..8ea224192acf6fe6fc8cb95f1fd299a7646f08b5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/forwardprop_util.py @@ -0,0 +1,74 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for managing forward accumulators. + +A separate file from forwardprop.py so that functions can use these utilities. +""" + +import collections +import contextlib + +from tensorflow.python import pywrap_tfe + + +class TangentInfo( + collections.namedtuple("TangentInfo", ["indices", "tangents"])): + """Packed forward accumulator state. The return value of `pack_tangents`.""" + + def __new__(cls, indices=None, tangents=None): + if indices is None: + indices = () + if tangents is None: + tangents = [] + return super(TangentInfo, cls).__new__(cls, indices, tangents) + + +def pack_tangents(tensors): + """Packs forward accumulator state into a TangentInfo tuple. + + Args: + tensors: A flat list of Tensors to pack forward accumulator state for. + + Returns: + A tuple of (indices, tangents): + indices: A sequence of sequences of two-element tuples. Each forward + accumulator is represented as a sequence of tuples with (primal_index, + jvp_index). Both integers index into the concatenated `tensors + jvps` + array. + tangents: A flat list of Tensors. Best interpreted as a sequence to be + appended to `tensors`. + """ + return TangentInfo(*pywrap_tfe.TFE_Py_PackJVPs(tensors)) + + +@contextlib.contextmanager +def push_forwardprop_state(): + """Temporarily push or pop transient state for accumulators in the active set. + + Allows an accumulator which is currently processing an operation to + temporarily reset its state. This is useful when building forwardprop versions + of functions, where an accumulator will trigger function building and then + must process captured symbolic tensors while building it. Without pushing and + popping, accumulators ignore operations executed as a direct result of their + own jvp computations. + + Yields: + None (used for its side effect). + """ + try: + pywrap_tfe.TFE_Py_ForwardAccumulatorPushState() + yield + finally: + pywrap_tfe.TFE_Py_ForwardAccumulatorPopState() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/function.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/function.py new file mode 100644 index 0000000000000000000000000000000000000000..c4df3b680314dedcef57ff6802181cbd64ed38d5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/function.py @@ -0,0 +1,37 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Supports old symbols supplied by this file while the code is refactored.""" + +# pylint:disable=unused-import,g-bad-import-order + +# TODO(b/243822285): Reduce this list as much as possible. +# Constants +from tensorflow.python.eager.polymorphic_function.concrete_function import _BACKWARD_PREFIX +from tensorflow.python.eager.polymorphic_function.concrete_function import _FORWARD_PREFIX +from tensorflow.python.eager.polymorphic_function.concrete_function import _INFERENCE_PREFIX + +# Function Classes +from tensorflow.python.eager.polymorphic_function.concrete_function import ConcreteFunction +from tensorflow.python.eager.polymorphic_function.atomic_function import from_func_graph +from tensorflow.python.eager.polymorphic_function.atomic_function import AtomicFunction + +# Utilities +from tensorflow.python.eager.polymorphic_function.tf_method_target import TfMethodTarget +from tensorflow.python.eager.polymorphic_function.concrete_function import _inference_name + +# TODO(b/244360504): Remove in favor of graph transformation API. +# QUARANTINED - Function Callback Modification API +from tensorflow.python.eager.polymorphic_function.transform import FUNC_GRAPH_TRANSFORMS +from tensorflow.python.eager.polymorphic_function.transform import CONCRETE_FUNCTION_CALLBACKS diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/graph_only_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/graph_only_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2d2d5c557fdeb423a8e38f3acc40a16fa30bcf3d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/graph_only_ops.py @@ -0,0 +1,46 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Graph-only versions of a few op functions, for internal use only.""" + +# Must be separate from array_ops to avoid a cyclic dependency. + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.python.framework import op_callbacks +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape + + +def graph_placeholder(dtype, shape, name=None): + """Graph-only version of tf.compat.v1.placeholder(), for internal use only.""" + dtype = dtype.base_dtype + dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum) + if isinstance(shape, (list, tuple)): + shape = tensor_shape.TensorShape(shape) + shape = attr_value_pb2.AttrValue(shape=shape.as_proto()) + g = ops.get_default_graph() + attrs = {"dtype": dtype_value, "shape": shape} + op = g._create_op_internal( # pylint: disable=protected-access + "Placeholder", [], [dtype], input_types=[], + attrs=attrs, name=name) + result, = op.outputs + if op_callbacks.should_invoke_op_callbacks(): + # TODO(b/147670703): Once the special-op creation code paths + # are unified. Remove this `if` block. + callback_outputs = op_callbacks.invoke_op_callbacks( + "Placeholder", tuple(), attrs, tuple(op.outputs), + op_name=name, graph=g) + if callback_outputs is not None: + result, = callback_outputs + return result diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/imperative_grad.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/imperative_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..86d6e78b7fc316522ddc790318f9960485da7d7e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/imperative_grad.py @@ -0,0 +1,73 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Code for backpropagation using the tape utilities.""" + +import collections + +from tensorflow.python import pywrap_tfe +from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients +from tensorflow.python.util import compat + +VSpace = collections.namedtuple("VSpace", [ + "aggregate_fn", "num_elements_fn", "zeros_fn", "ones_fn", + "zeros_like_fn", "ones_like_fn", "graph_shape_fn" +]) + + +def imperative_grad(tape, + target, + sources, + output_gradients=None, + sources_raw=None, + unconnected_gradients=UnconnectedGradients.NONE): + """Computes gradients from the imperatively defined tape on top of the stack. + + Works by filtering the tape, computing how many downstream usages are of each + tensor and entry, and repeatedly applying backward functions until we have + gradients for all sources. + + Args: + tape: the gradient tape which stores the trace. + target: either a Tensor or list of Tensors to be differentiated. + sources: list of Tensors for which we want gradients + output_gradients: if not None, a list of gradient provided for each Target, + or None if we are to use the target's computed downstream gradient. + sources_raw: if not None, a list of the source python objects from which the + sources were generated. Should have the same length as sources. Only needs + to be populated if unconnected_gradients is 'zero'. + unconnected_gradients: determines the value returned if the target and + sources are unconnected. When 'none' the value returned is None wheras when + 'zero' a zero tensor in the same shape as the sources is returned. + + Returns: + the gradient wrt each of the sources. + + Raises: + ValueError: if the arguments are invalid. + RuntimeError: if something goes wrong. + """ + try: + unconnected_gradients = UnconnectedGradients(unconnected_gradients) + except ValueError: + raise ValueError( + "Unknown value for unconnected_gradients: %r" % unconnected_gradients) + + return pywrap_tfe.TFE_Py_TapeGradient( + tape._tape, # pylint: disable=protected-access + target, + sources, + output_gradients, + sources_raw, + compat.as_str(unconnected_gradients.value)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/lift_to_graph.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/lift_to_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7ac1b8e0dff20a420f699175fac5c33ab79b05 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/lift_to_graph.py @@ -0,0 +1,365 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""Utility to lift subgraphs.""" + +import collections + +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import op_selector +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.util import compat +from tensorflow.python.util import object_identity +from tensorflow.python.util.tf_export import tf_export + + +UnliftableError = op_selector.UnliftableError + + +def _as_operation(op_or_tensor): + if isinstance(op_or_tensor, tensor_lib.Tensor): + return op_or_tensor.op + return op_or_tensor + + +def _constant_inputs(op_or_tensor): + return all(_as_operation(i).type == u"Const" + and not _as_operation(i).control_inputs + for i in op_selector.graph_inputs(_as_operation(op_or_tensor))) + + +# Represents an input to `copied_op` which must be updated once +# `old_graph_tensor` has been copied. +_InputMutation = collections.namedtuple( + "_InputMutation", + ["copied_op", "input_index", "old_graph_tensor"]) + + +# Represents a control input to `copied_op` which must be added once +# `old_graph_op` has been copied. +_ControlMutation = collections.namedtuple( + "_ControlMutation", + ["copied_op", "old_graph_op"]) + + +def _copy_non_source(op, graph, op_map, base_graph): + """Copy an op directly to a given graph. + + Generally `op`'s inputs should already have been copied. If this is not the + case, for example with v1 while_loops, then `_copy_non_source` inserts + placeholders for the unavailable Tensors and returns a list of required + mutations. + + Args: + op: The op to be copied. + graph: The destination graph. + op_map: A dict mapping ops and tensors in the old graph to the new one. + base_graph: The graph we're copying from, for any necessary functions. + Returns: + A tuple of (required_inputs, required_control_inputs): + required_inputs: + A list of `_InputMutation` tuples containing inputs to `copied_op` which + must be updated once `old_graph_tensor` has been copied. + required_control_inputs: + A list of `_ControlMutation` tuples containing control inputs to + `copied_op` which must be added once `old_graph_op` has been copied. + """ + input_mutations = [] + control_mutations = [] + copied_inputs = [] + for input_index, original_input in enumerate(op.inputs): + copied_input = op_map.get(original_input, None) + if copied_input is None: + # An input for this op is missing due to a loop in the graph. We'll insert + # a placeholder for now and return information about the required post-hoc + # mutation. + copied_input = array_ops.placeholder( + name="unused_control_flow_input", + shape=original_input.shape, + dtype=original_input.dtype) + input_mutations.append( + # `copied_op` is filled in below, after we've created it. + _InputMutation(copied_op=None, + input_index=input_index, + old_graph_tensor=original_input)) + copied_inputs.append(copied_input) + + copied_control_inputs = [] + for original_control_input in op.control_inputs: + copied_control_input = op_map.get(original_control_input, None) + if copied_control_input is None: + control_mutations.append( + _ControlMutation(copied_op=None, + old_graph_op=original_control_input)) + else: + copied_control_inputs.append(copied_control_input) + + # Don't copy over nodes with _tpu_replicate attribute. This attributed is used + # to signal that the op was built inside a tpu_replicate context; if we're + # lifting it to another graph we're similarly lifting it into another context. + with ops.control_dependencies(copied_control_inputs), ops.device(op.device): + # pylint: disable=protected-access + f = base_graph._functions.get(op.type, None) + if f is not None and compat.as_str(f.name) not in graph._functions: + f.add_to_graph(graph) + # pylint: enable=protected-access + + # Create a new op in the destination graph if it doesn't exist before. + copied_op = graph.create_op( + op_type=op.type, + inputs=copied_inputs, + dtypes=[x.dtype for x in op.outputs], + attrs={ + key: value for key, value in op.node_def.attr.items() + if not key.startswith("_class") and + not key.startswith("_tpu_replicate") + }, # b/128981532. + name=op.name) + op_map[op] = copied_op + for i, o in enumerate(op.outputs): + op_map[o] = copied_op.outputs[i] + + return ([mutation._replace(copied_op=copied_op) + for mutation in input_mutations], + [mutation._replace(copied_op=copied_op) + for mutation in control_mutations]) + + +def _copy_source(s, graph, op_map, handle_captures, inverse_captures, + base_graph): + """Create a source in a graph based on a Tensor from a different graph. + + This function creates a placeholder analog of `s` in a graph with the + following behavior: + + 1) If s is a captured Tensor or Variable and handle_captures is set to True, + simply capture it in the new graph as well. + + 2) If s is a PlaceholderWithDefault whose default is a constant, preserve + said default in the new graph. + + 3) When applicable, copy resource variable metadata from `s` to the newly + created placeholder. + + Args: + s: The source of interest. + graph: The destination graph. + op_map: A dict mapping ops and tensors in the old graph to the new one. + handle_captures: A boolean indicating whether to re-capture s in the new + graph or simply create a vanilla placeholder. + inverse_captures: A dict mapping s back to the Tensor or Variable that it + captures. + base_graph: The graph being copied from. + """ + if handle_captures and s in inverse_captures: + copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name) + elif s.op.type == "PlaceholderWithDefault" and _constant_inputs(s): + # Copy the default value to the graph. + default_value = s.op.inputs[0] + unavailable_inputs, unavailable_control_inputs = _copy_non_source( + op=default_value.op, graph=graph, op_map=op_map, + base_graph=base_graph) + if unavailable_inputs or unavailable_control_inputs: + raise AssertionError( + "Could not copy source node {} because it has inputs." + .format(default_value)) + + with ops.device(s.op.device): + copied_placeholder = array_ops.placeholder_with_default( + input=op_map[default_value], shape=s.shape, name=s.op.name) + else: + with ops.device(s.op.device): + copied_placeholder = array_ops.placeholder( + dtype=s.dtype, shape=s.shape, name=s.op.name) + + base_handle = resource_variable_ops.get_resource_handle_data(s) + if base_handle.shape_and_type: + resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access + copied_placeholder, + base_handle, + graph_mode=True) + + op_map[s] = copied_placeholder + # Add an entry for the op of the source tensor so that if there are any nodes + # depending on that op via control dependencies it can work correctly. + op_map[s.op] = copied_placeholder.op + + +@tf_export("__internal__.lift_to_graph", v1=[]) +def lift_to_graph(tensors, + graph, + sources=None, + disallowed_placeholders=None, + add_sources=False, + handle_captures=False, + base_graph=None, + op_map=None): + """Copies the tensor and all its inputs recursively to the outer graph. + + Args: + tensors: The Tensors to lift. + graph: The graph to lift to. + sources: Optional sequence of nodes to start from. If omitted the whole + subgraph which feeds into `init_tensor` is lifted. + disallowed_placeholders: An optional set of ops which may not appear in the + lifted graph. Defaults to all placeholders. + add_sources: A boolean indicating whether placeholders which are not in + sources should be allowed. + handle_captures: A boolean indicating whether to re-capture s in the new + graph or simply create a vanilla placeholder. + base_graph: The graph from which to lift ops. This will be inferred if not + specified. + op_map: A map contains all the existing nodes that have been lifted to the + destination graph, so they won't be lifted and copied again. + + Returns: + A mapping from ops in the current default graph to ops in `graph`. + + Raises: + UnliftableError: If a placeholder blocks lifting. + """ + variable_init_tensors = [] + init_tensors = [] + for tensor in tensors: + if isinstance(tensor, resource_variable_ops.ResourceVariable): + variable_init_tensors.append(tensor) + else: + init_tensors.append(tensor) + base_graph = base_graph or init_tensors[0].graph + op_map = op_map or object_identity.ObjectIdentityDictionary() + + # Check that the initializer does not depend on any placeholders. + sources = object_identity.ObjectIdentitySet(sources or []) + visited_ops = set(x.op for x in sources) + op_outputs = collections.defaultdict(set) + + # First we extract the subgraph between init_tensors and sources. + for init_tensor in init_tensors: + sources.update(op_selector.map_subgraph( + init_tensor=init_tensor, + sources=sources, + disallowed_placeholders=disallowed_placeholders, + visited_ops=visited_ops, + op_outputs=op_outputs, + add_sources=add_sources)) + + # Try to topologically sort the nodes we've extracted. Now we know how many of + # their outputs are part of this subgraph. + ops_to_copy = [] + marked_ops = set([]) + ops_to_visit = [_as_operation(t) for t in init_tensors + if not op_outputs[_as_operation(t)]] + unvisited_ops = set(ops_to_visit) + while unvisited_ops: + while ops_to_visit: + op = ops_to_visit.pop() + if op in marked_ops: + continue + marked_ops.add(op) + ops_to_copy.append(op) + for inp in op_selector.graph_inputs(op): + # Don't lift the TPUReplicateMetadata nodes out of the function, because + # it has no registered kernels. + if inp.type == "TPUReplicateMetadata": + continue + unvisited_ops.add(inp) + if (all(x in marked_ops for x in op_outputs[inp]) and + inp not in sources): + ops_to_visit.append(inp) + unvisited_ops.difference_update(marked_ops) + if unvisited_ops: + # `unvisited_ops` should only have elements if the graph has a loop. In + # this case we want to keep copying and there's no topological ordering; + # we'll do ugly post-hoc mutations instead. + ops_to_visit.append(next(iter(unvisited_ops))) + + # When the topological sort fails due to loops, it can result in exceptions + # later when copying a node which inputs haven't been copied yet. We can + # improve that pseudo-topological order slightly by putting the ops without + # inputs, such as constants, at the start of the topological order (i.e at + # the end of ops_to_copy). + ops_to_copy.sort(key=(lambda op: len(op_selector.graph_inputs(op)) == 0)) + + # When lifting from one FuncGraph to another, we will need to capture the + # relevant tensors as well. + captures = [] + inverse_captures = object_identity.ObjectIdentityDictionary() + internal_captures = [] + if (isinstance(base_graph, func_graph.FuncGraph) and + isinstance(graph, func_graph.FuncGraph)): + captures = base_graph.captures + for external_capture, internal_capture in captures: + inverse_captures[internal_capture] = external_capture + internal_captures = base_graph.internal_captures + + # ops_to_copy now holds a reverse topologically sorted list of ops which + # ends in the initializer. We copy those to the outermost graph and + # build the initialization op there. + with graph.as_default(): + for i in variable_init_tensors: + op_map[i] = i + source_ops = set() + # Add the sources in the same order as the original graph. + for s in internal_captures: + if s in sources: + sources.remove(s) + source_ops.add(s.op) + _copy_source( + s=s, + graph=graph, + op_map=op_map, + handle_captures=handle_captures, + inverse_captures=inverse_captures, + base_graph=base_graph) + for s in sources: + source_ops.add(s.op) + _copy_source( + s=s, + graph=graph, + op_map=op_map, + handle_captures=handle_captures, + inverse_captures=inverse_captures, + base_graph=base_graph) + + input_mutations = [] + control_mutations = [] + for op in reversed(ops_to_copy): + if op in source_ops or op in op_map: + continue + new_input_mutations, new_control_mutations = _copy_non_source( + op=op, graph=graph, op_map=op_map, base_graph=base_graph) + input_mutations.extend(new_input_mutations) + control_mutations.extend(new_control_mutations) + + # Mutate the new graph to insert any loops which existed in the source + # graph due to v1 while_loops. + # + # pylint: disable=protected-access + with graph._mutation_lock(): + for mutation in input_mutations: + mutation.copied_op._update_input( + mutation.input_index, op_map[mutation.old_graph_tensor]) + for mutation in control_mutations: + # Don't lift the TPUReplicateMetadata nodes out of the function, because + # it has no registered kernels. + if mutation.old_graph_op.type == "TPUReplicateMetadata": + continue + mutation.copied_op._add_control_input(op_map[mutation.old_graph_op]) + # pylint: enable=protected-access + + return op_map diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43dd263a0a867ca53209e5d176ddc8f50c20d7c5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__pycache__/memory_test_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__pycache__/memory_test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2714b5702aa76b106ea675098b029edd2f79fe49 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/__pycache__/memory_test_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/memory_test_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/memory_test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..7d584661b342b7efa4590d2a41811433d75abd72 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/memory_tests/memory_test_util.py @@ -0,0 +1,73 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils for memory tests.""" + +import collections +import gc +import time + +from tensorflow.python.eager import context + +# memory_profiler might not be available in the OSS version of TensorFlow. +try: + import memory_profiler # pylint:disable=g-import-not-at-top +except ImportError: + memory_profiler = None + + +def _instance_count_by_class(): + counter = collections.Counter() + + for obj in gc.get_objects(): + try: + counter[obj.__class__.__name__] += 1 + except Exception: # pylint:disable=broad-except + pass + + return counter + + +def assert_no_leak(f, num_iters=100000, increase_threshold_absolute_mb=25): + """Assert memory usage doesn't increase beyond given threshold for f.""" + + with context.eager_mode(): + # Warm up. + f() + + # Wait for background threads to start up and take over memory. + # FIXME: The nature of this test leaves few other options. Maybe there + # is a better way to do this. + time.sleep(4) + + gc.collect() + initial = memory_profiler.memory_usage(-1)[0] + instance_count_by_class_before = _instance_count_by_class() + + for _ in range(num_iters): + f() + + gc.collect() + increase = memory_profiler.memory_usage(-1)[0] - initial + + assert increase < increase_threshold_absolute_mb, ( + "Increase is too high. Initial memory usage: %f MB. Increase: %f MB. " + "Maximum allowed increase: %f MB. " + "Instance count diff before/after: %s") % ( + initial, increase, increase_threshold_absolute_mb, + _instance_count_by_class() - instance_count_by_class_before) + + +def memory_profiler_is_available(): + return memory_profiler is not None diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/monitoring.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/monitoring.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d5d4e610f1b375f0d8a0a46d3ff6991406afea --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/monitoring.py @@ -0,0 +1,542 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TensorFlow monitoring APIs.""" + +import collections +import functools +import time + +from tensorflow.core.framework import summary_pb2 +from tensorflow.python import pywrap_tfe +from tensorflow.python.client import pywrap_tf_session +from tensorflow.python.framework import c_api_util +from tensorflow.python.util import compat +from tensorflow.python.util.tf_export import tf_export + +_MetricMethod = collections.namedtuple('MetricMethod', 'create delete get_cell') +_counter_methods = [ + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewCounter0, + delete=pywrap_tfe.TFE_MonitoringDeleteCounter0, + get_cell=pywrap_tfe.TFE_MonitoringGetCellCounter0), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewCounter1, + delete=pywrap_tfe.TFE_MonitoringDeleteCounter1, + get_cell=pywrap_tfe.TFE_MonitoringGetCellCounter1), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewCounter2, + delete=pywrap_tfe.TFE_MonitoringDeleteCounter2, + get_cell=pywrap_tfe.TFE_MonitoringGetCellCounter2), +] +_int_gauge_methods = [ + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewIntGauge0, + delete=pywrap_tfe.TFE_MonitoringDeleteIntGauge0, + get_cell=pywrap_tfe.TFE_MonitoringGetCellIntGauge0), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewIntGauge1, + delete=pywrap_tfe.TFE_MonitoringDeleteIntGauge1, + get_cell=pywrap_tfe.TFE_MonitoringGetCellIntGauge1), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewIntGauge2, + delete=pywrap_tfe.TFE_MonitoringDeleteIntGauge2, + get_cell=pywrap_tfe.TFE_MonitoringGetCellIntGauge2), +] +_string_gauge_methods = [ + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewStringGauge0, + delete=pywrap_tfe.TFE_MonitoringDeleteStringGauge0, + get_cell=pywrap_tfe.TFE_MonitoringGetCellStringGauge0), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewStringGauge1, + delete=pywrap_tfe.TFE_MonitoringDeleteStringGauge1, + get_cell=pywrap_tfe.TFE_MonitoringGetCellStringGauge1), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewStringGauge2, + delete=pywrap_tfe.TFE_MonitoringDeleteStringGauge2, + get_cell=pywrap_tfe.TFE_MonitoringGetCellStringGauge2), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewStringGauge3, + delete=pywrap_tfe.TFE_MonitoringDeleteStringGauge3, + get_cell=pywrap_tfe.TFE_MonitoringGetCellStringGauge3), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewStringGauge4, + delete=pywrap_tfe.TFE_MonitoringDeleteStringGauge4, + get_cell=pywrap_tfe.TFE_MonitoringGetCellStringGauge4), +] +_bool_gauge_methods = [ + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewBoolGauge0, + delete=pywrap_tfe.TFE_MonitoringDeleteBoolGauge0, + get_cell=pywrap_tfe.TFE_MonitoringGetCellBoolGauge0), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewBoolGauge1, + delete=pywrap_tfe.TFE_MonitoringDeleteBoolGauge1, + get_cell=pywrap_tfe.TFE_MonitoringGetCellBoolGauge1), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewBoolGauge2, + delete=pywrap_tfe.TFE_MonitoringDeleteBoolGauge2, + get_cell=pywrap_tfe.TFE_MonitoringGetCellBoolGauge2), +] +_sampler_methods = [ + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewSampler0, + delete=pywrap_tfe.TFE_MonitoringDeleteSampler0, + get_cell=pywrap_tfe.TFE_MonitoringGetCellSampler0), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewSampler1, + delete=pywrap_tfe.TFE_MonitoringDeleteSampler1, + get_cell=pywrap_tfe.TFE_MonitoringGetCellSampler1), + _MetricMethod( + create=pywrap_tfe.TFE_MonitoringNewSampler2, + delete=pywrap_tfe.TFE_MonitoringDeleteSampler2, + get_cell=pywrap_tfe.TFE_MonitoringGetCellSampler2), +] + + +class Metric(object): + """The base class of metric.""" + + __slots__ = ["_metric", "_metric_name", "_metric_methods", "_label_length"] + + def __init__(self, metric_name, metric_methods, label_length, *args): + """Creates a new metric. + + Args: + metric_name: name of the metric class. + metric_methods: list of swig metric methods. + label_length: length of label args. + *args: the arguments to call create method. + """ + self._metric_name = metric_name + self._metric_methods = metric_methods + self._label_length = label_length + + if label_length >= len(self._metric_methods): + raise ValueError('Cannot create {} metric with label >= {}'.format( + self._metric_name, len(self._metric_methods))) + + self._metric = self._metric_methods[self._label_length].create(*args) + + def __del__(self): + try: + deleter = self._metric_methods[self._label_length].delete + metric = self._metric + except AttributeError: + return + + if deleter is not None: + deleter(metric) + + def get_cell(self, *labels): + """Retrieves the cell.""" + if len(labels) != self._label_length: + raise ValueError('The {} expects taking {} labels'.format( + self._metric_name, self._label_length)) + return self._metric_methods[self._label_length].get_cell( + self._metric, *labels) + + +class CounterCell(object): + """CounterCell stores each value of a Counter.""" + + __slots__ = ["_cell"] + + def __init__(self, cell): + """Creates a new CounterCell. + + Args: + cell: A c pointer of TFE_MonitoringCounterCell. + """ + self._cell = cell + + def increase_by(self, value): + """Atomically increments the value. + + Args: + value: non-negative value. + """ + pywrap_tfe.TFE_MonitoringCounterCellIncrementBy(self._cell, value) + + def value(self): + """Retrieves the current value.""" + return pywrap_tfe.TFE_MonitoringCounterCellValue(self._cell) + + +class Counter(Metric): + """A stateful class for updating a cumulative integer metric. + + This class encapsulates a set of values (or a single value for a label-less + metric). Each value is identified by a tuple of labels. The class allows the + user to increment each value. + """ + + __slots__ = [] + + def __init__(self, name, description, *labels): + """Creates a new Counter. + + Args: + name: name of the new metric. + description: description of the new metric. + *labels: The label list of the new metric. + """ + super(Counter, self).__init__('Counter', _counter_methods, len(labels), + name, description, *labels) + + def get_cell(self, *labels): + """Retrieves the cell.""" + return CounterCell(super(Counter, self).get_cell(*labels)) + + +class IntGaugeCell(object): + """A single integer value stored in an `IntGauge`.""" + + __slots__ = ["_cell"] + + def __init__(self, cell): + """Creates a new IntGaugeCell. + + Args: + cell: A c pointer of TFE_MonitoringIntGaugeCell. + """ + self._cell = cell + + def set(self, value): + """Atomically set the value. + + Args: + value: integer value. + """ + pywrap_tfe.TFE_MonitoringIntGaugeCellSet(self._cell, value) + + def value(self): + """Retrieves the current value.""" + return pywrap_tfe.TFE_MonitoringIntGaugeCellValue(self._cell) + + +class IntGauge(Metric): + """A stateful class for updating a gauge-like integer metric. + + This class encapsulates a set of integer values (or a single value for a + label-less metric). Each value is identified by a tuple of labels. The class + allows the user to set each value. + """ + + __slots__ = [] + + def __init__(self, name, description, *labels): + """Creates a new IntGauge. + + Args: + name: name of the new metric. + description: description of the new metric. + *labels: The label list of the new metric. + """ + super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels), + name, description, *labels) + + def get_cell(self, *labels): + """Retrieves the cell.""" + return IntGaugeCell(super(IntGauge, self).get_cell(*labels)) + + +class StringGaugeCell(object): + """A single string value stored in an `StringGauge`.""" + + __slots__ = ["_cell"] + + def __init__(self, cell): + """Creates a new StringGaugeCell. + + Args: + cell: A c pointer of TFE_MonitoringStringGaugeCell. + """ + self._cell = cell + + def set(self, value): + """Atomically set the value. + + Args: + value: string value. + """ + pywrap_tfe.TFE_MonitoringStringGaugeCellSet(self._cell, value) + + def value(self): + """Retrieves the current value.""" + with c_api_util.tf_buffer() as buffer_: + pywrap_tfe.TFE_MonitoringStringGaugeCellValue(self._cell, buffer_) + value = pywrap_tf_session.TF_GetBuffer(buffer_).decode('utf-8') + return value + + +class StringGauge(Metric): + """A stateful class for updating a gauge-like string metric. + + This class encapsulates a set of string values (or a single value for a + label-less metric). Each value is identified by a tuple of labels. The class + allows the user to set each value. + """ + + __slots__ = [] + + def __init__(self, name, description, *labels): + """Creates a new StringGauge. + + Args: + name: name of the new metric. + description: description of the new metric. + *labels: The label list of the new metric. + """ + super(StringGauge, self).__init__('StringGauge', _string_gauge_methods, + len(labels), name, description, *labels) + + def get_cell(self, *labels): + """Retrieves the cell.""" + return StringGaugeCell(super(StringGauge, self).get_cell(*labels)) + + +class BoolGaugeCell(object): + """A single boolean value stored in an `BoolGauge`.""" + + __slots__ = ["_cell"] + + def __init__(self, cell): + """Creates a new BoolGaugeCell. + + Args: + cell: A c pointer of TFE_MonitoringBoolGaugeCell. + """ + self._cell = cell + + def set(self, value): + """Atomically set the value. + + Args: + value: bool value. + """ + pywrap_tfe.TFE_MonitoringBoolGaugeCellSet(self._cell, value) + + def value(self): + """Retrieves the current value.""" + return pywrap_tfe.TFE_MonitoringBoolGaugeCellValue(self._cell) + + +@tf_export("__internal__.monitoring.BoolGauge", v1=[]) +class BoolGauge(Metric): + """A stateful class for updating a gauge-like bool metric. + + This class encapsulates a set of boolean values (or a single value for a + label-less metric). Each value is identified by a tuple of labels. The class + allows the user to set each value. + """ + + __slots__ = [] + + def __init__(self, name, description, *labels): + """Creates a new BoolGauge. + + Args: + name: name of the new metric. + description: description of the new metric. + *labels: The label list of the new metric. + """ + super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods, + len(labels), name, description, *labels) + + def get_cell(self, *labels): + """Retrieves the cell.""" + return BoolGaugeCell(super(BoolGauge, self).get_cell(*labels)) + + +class SamplerCell(object): + """SamplerCell stores each value of a Sampler.""" + + __slots__ = ["_cell"] + + def __init__(self, cell): + """Creates a new SamplerCell. + + Args: + cell: A c pointer of TFE_MonitoringSamplerCell. + """ + self._cell = cell + + def add(self, value): + """Atomically add a sample. + + Args: + value: float value. + """ + pywrap_tfe.TFE_MonitoringSamplerCellAdd(self._cell, value) + + def value(self): + """Retrieves the current distribution of samples. + + Returns: + A HistogramProto describing the distribution of samples. + """ + with c_api_util.tf_buffer() as buffer_: + pywrap_tfe.TFE_MonitoringSamplerCellValue(self._cell, buffer_) + proto_data = pywrap_tf_session.TF_GetBuffer(buffer_) + histogram_proto = summary_pb2.HistogramProto() + histogram_proto.ParseFromString(compat.as_bytes(proto_data)) + return histogram_proto + + +class Buckets(object): + """Bucketing strategies for the samplers.""" + + __slots__ = ["buckets"] + + def __init__(self, buckets): + """Creates a new Buckets. + + Args: + buckets: A c pointer of TFE_MonitoringBuckets. + """ + self.buckets = buckets + + def __del__(self): + pywrap_tfe.TFE_MonitoringDeleteBuckets(self.buckets) + + +class ExponentialBuckets(Buckets): + """Exponential bucketing strategy. + + Sets up buckets of the form: + [-DBL_MAX, ..., scale * growth^i, + scale * growth_factor^(i + 1), ..., DBL_MAX]. + """ + + __slots__ = [] + + def __init__(self, scale, growth_factor, bucket_count): + """Creates a new exponential Buckets. + + Args: + scale: float + growth_factor: float + bucket_count: integer + """ + super(ExponentialBuckets, self).__init__( + pywrap_tfe.TFE_MonitoringNewExponentialBuckets(scale, growth_factor, + bucket_count)) + + +class Sampler(Metric): + """A stateful class for updating a cumulative histogram metric. + + This class encapsulates a set of histograms (or a single histogram for a + label-less metric) configured with a list of increasing bucket boundaries. + Each histogram is identified by a tuple of labels. The class allows the + user to add a sample to each histogram value. + """ + + __slots__ = [] + + def __init__(self, name, buckets, description, *labels): + """Creates a new Sampler. + + Args: + name: name of the new metric. + buckets: bucketing strategy of the new metric. + description: description of the new metric. + *labels: The label list of the new metric. + """ + super(Sampler, self).__init__('Sampler', _sampler_methods, len(labels), + name, buckets.buckets, description, *labels) + + def get_cell(self, *labels): + """Retrieves the cell.""" + return SamplerCell(super(Sampler, self).get_cell(*labels)) + + +# Keeping track of current MonitoredTimer sections to prevent repetitive +# counting. +MonitoredTimerSections = [] + + +class MonitoredTimer(object): + """A context manager to measure the walltime and increment a Counter cell.""" + + __slots__ = [ + "cell", + "t", + "monitored_section_name", + "_counting", + "_avoid_repetitive_counting", + ] + + def __init__( + self, cell, monitored_section_name=None, avoid_repetitive_counting=False + ): + """Creates a new MonitoredTimer. + + Args: + cell: the cell associated with the time metric that will be inremented. + monitored_section_name: name of action being monitored here. + avoid_repetitive_counting: when set to True, if already in a monitored + timer section with the same monitored_section_name, skip counting. + """ + self.cell = cell + self.monitored_section_name = monitored_section_name + self._avoid_repetitive_counting = avoid_repetitive_counting + self._counting = True + + def __enter__(self): + if ( + self._avoid_repetitive_counting + and self.monitored_section_name + and self.monitored_section_name in MonitoredTimerSections + ): + self._counting = False + return self + + self.t = time.time() + if self.monitored_section_name: + MonitoredTimerSections.append(self.monitored_section_name) + + return self + + def __exit__(self, exception_type, exception_value, traceback): + del exception_type, exception_value, traceback + if self._counting: + micro_seconds = (time.time() - self.t) * 1000000 + self.cell.increase_by(int(micro_seconds)) + if self.monitored_section_name: + MonitoredTimerSections.remove(self.monitored_section_name) + + +def monitored_timer(cell): + """A function decorator for adding MonitoredTimer support. + + Args: + cell: the cell associated with the time metric that will be inremented. + Returns: + A decorator that measure the function runtime and increment the specified + counter cell. + """ + + def actual_decorator(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + with MonitoredTimer(cell): + return func(*args, **kwargs) + + return wrapper + + return actual_decorator diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f42a39243d8cba772c90fd37cdc1b4294573ab5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/atomic_function.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/atomic_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03866675b8ea0d7fdc6cd169417d9d3177ca5332 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/atomic_function.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/attributes.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/attributes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d068abab5a43cc9c35afde7c04ad107870e1b05 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/attributes.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/autograph_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/autograph_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91371a52f1baf81df5ea0fb5e1be0da3e9928de9 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/autograph_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/compiler_ir.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/compiler_ir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b931e34608d4fc4002c6a572bd0e74fa5e0a4dae Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/compiler_ir.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/composite_tensor_utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/composite_tensor_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21e84e6fad628610d8946c9dca341b29746a3103 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/composite_tensor_utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/concrete_function.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/concrete_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f8c87a06c5dc1baf444726cab1c2aa1d966facb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/concrete_function.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/eager_function_run.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/eager_function_run.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ed9a4119099f14b1f34e4adf122935afea8cf5d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/eager_function_run.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/function_context.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/function_context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..519987a3e12982196fee4eebb1b3e538ea8c0680 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/function_context.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/function_type_utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/function_type_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c864b21074d77be16a664446eab0f69476f194d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/function_type_utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/polymorphic_function.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/polymorphic_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92322d866689eed0ba4c2f4d95113e5135f5ca56 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/polymorphic_function.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/saved_model_exported_concrete.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/saved_model_exported_concrete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbe9d2d22ed36bd70452bedfa88bc225b8de5b22 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/saved_model_exported_concrete.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/saved_model_utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/saved_model_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b232b6180757ccef1aec34f9b9bbf89e9fc57de Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/saved_model_utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/tf_method_target.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/tf_method_target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c65a652c1c4cb1d70f37d6f7444d3d29398f1b90 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/tf_method_target.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/tracing_compilation.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/tracing_compilation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74cacc607944c6add1809b259a448324be398bd5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/tracing_compilation.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/transform.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1b520d6f482281c0458c51831100abdc2bc6982 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/__pycache__/transform.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/atomic_function.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/atomic_function.py new file mode 100644 index 0000000000000000000000000000000000000000..3493fff2541ee07cd60e37837232b5123f39600e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/atomic_function.py @@ -0,0 +1,693 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation for AtomicFunction.""" + +import dataclasses +import traceback +import typing +from typing import Any, Dict, List, Optional, Sequence, Union + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.framework import function_pb2 +from tensorflow.core.framework import graph_debug_info_pb2 +from tensorflow.core.function.polymorphism import function_type as function_type_lib +from tensorflow.python.client import pywrap_tf_session +from tensorflow.python.eager import context +from tensorflow.python.eager import record +from tensorflow.python.eager.polymorphic_function import attributes as attributes_lib +from tensorflow.python.eager.polymorphic_function import function_type_utils +from tensorflow.python.framework import auto_control_deps_utils as acd +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import error_interpolation +from tensorflow.python.framework import errors +from tensorflow.python.framework import func_graph as func_graph_module +from tensorflow.python.framework import function_def_to_graph +from tensorflow.python.framework import ops +from tensorflow.python.ops import handle_data_util +from tensorflow.python.types import core +from tensorflow.python.util import compat +from tensorflow.python.util import function_utils +from tensorflow.python.util import tf_stack + + +# TODO(fmuham): Should be lowered to FunctionDef/FunctionRecord. +@dataclasses.dataclass(frozen=True) +class CallOptions: + """Specifies additional configuration for an AtomicFunction call.""" + + # Used by ACD to identify the CollectiveManager this function is scoped in. + collective_manager_ids_used: List[int] = dataclasses.field( + default_factory=list + ) + + # Used by ACD to list Ops/Tensors/Callables that must be called in advance. + control_captures: List[Any] = dataclasses.field(default_factory=list) + + # Determines what kind of partitoned call is used for this function. + is_stateful: bool = False + + +# Maps the (scope_id, name) in runtime to associated AtomicFunctions. +RUNTIME_FUNCTION_REFS = {} + + +class AtomicFunction(core.AtomicFunction): + """A Python callable for functions in the TF Runtime. + + Provides core functionality for tf.function including: + - automatic lifecycle management of runtime functions + - structured inputs (including captures) and structured outputs + - calls from both eager and graph mode + - dependency tracking of children functions + - runtime error interpolation to identify user code stack traces + - control dependencies (including automatic) + """ + + __slots__ = [ + "_name", + "_bound_context", + "_function_type", + "_children", + "_call_options", + "_cached_definition", + "_cached_graph", + "_generated_graph", + ] + + def __init__( + self, + name: Union[str, bytes], + bound_context: context.Context, + function_type: function_type_lib.FunctionType, + children: Optional[List["AtomicFunction"]] = None, + call_options: CallOptions = CallOptions(), + cached_graph: Optional[func_graph_module.FuncGraph] = None, + ): + """Construct a new AtomicFunction. + + Args: + name: str/bytes name of the runtime function in the bound context. + bound_context: interface to the runtime for the AtomicFunction. + function_type: input/output contract for the AtomicFunction + children: list of AtomicFunctions that are needed to call this one. + call_options: extra configuration options for the call. + cached_graph: FuncGraph that this AtomicFunction was generated from (if + known). Otherwise it will lazily construct a new corresponding FuncGraph + if ever needed. + """ + self._name = compat.as_bytes(name) + self._bound_context = bound_context + self._function_type = function_type + self._children = children if children else [] + self._call_options = call_options + self._cached_definition = None + + self._cached_graph = cached_graph + self._generated_graph = None + + ref_key = (self._bound_context.function_scope_id, self.name) + if ref_key not in RUNTIME_FUNCTION_REFS: + RUNTIME_FUNCTION_REFS[ref_key] = 1 + else: + RUNTIME_FUNCTION_REFS[ref_key] += 1 + + @property + def name(self) -> bytes: + """Name represented in UTF-8 encoded bytes.""" + return self._name + + @property + def function_type(self) -> function_type_lib.FunctionType: + """Represents the input/output contract of this function.""" + return self._function_type + + @property + def children(self) -> List["AtomicFunction"]: + """AtomicFunctions needed as dependencies for this one.""" + return self._children + + @property + def definition(self) -> function_pb2.FunctionDef: + """Current FunctionDef in the Runtime.""" + return self._bound_context.get_function_def(self.name) + + @property + def attributes(self) -> Any: + """Returns FunctionDef attributes in the Runtime.""" + attrs = self.definition.attr + # Remove construction context since it is specific to runtime and this fn. + attrs.pop(attributes_lib.EAGER_RUNTIME_CONSTRUCTION_CONTEXT, None) + return attrs + + @property + def graph_debug_info(self) -> graph_debug_info_pb2.GraphDebugInfo: + """A GraphDebugInfo proto mapping nodes to corresponding stack traces.""" + return self._bound_context.get_graph_debug_info(self.name) + + @property + def call_options(self) -> CallOptions: + """Call options declared for this AtomicFunction.""" + return self._call_options + + @property + def graph_call_attrs(self) -> Dict[str, Any]: + """Returns a dictionary of attributes needed to add a call in graph.""" + attrs = { + "is_stateful": self.call_options.is_stateful, + "tout": [ + o.dtype.as_datatype_enum for o in self.function_type.flat_outputs + ], + "xla_compile_attr": self.cached_definition.attr.get( + attributes_lib.XLA_COMPILE, None + ), + } + attrs.update(self._bound_context.function_call_options.as_attrs()) + return attrs + + @property + def _c_func(self) -> Any: + """Returns a scoped pybind object containing FunctionRecord in runtime.""" + return self._bound_context.get_c_function(self.name) + + # TODO(fmuham): Move caching to dependent code and remove method. + @property + def cached_definition(self) -> function_pb2.FunctionDef: + """Cached FunctionDef (not guaranteed to be fresh).""" + if self._cached_definition is None: + self._cached_definition = self.definition + + return self._cached_definition + + @property + def graph(self) -> func_graph_module.FuncGraph: + """Returns a FuncGraph corresponding to the AtomicFunction.""" + if self._cached_graph: + return self._cached_graph + + # Lazily generate the graph if one is not specified. + if not self._generated_graph: + self._generated_graph = to_func_graph(self) + + return self._generated_graph + + def call_with_captures( + self, args: Sequence[Any], kwargs: Dict[str, Any], captures: Sequence[Any] + ) -> Any: + """Calls with args, kwargs, captures and returns structured output.""" + bound_parameters = self.function_type.bind(*args, **kwargs) + tensor_inputs = self.function_type.unpack_inputs(bound_parameters) + capture_inputs = self.function_type.unpack_captures(captures) + return self.call_preflattened(tensor_inputs + capture_inputs) + + def call_preflattened(self, args: Sequence[core.Tensor]) -> Any: + """Calls with flattened tensor inputs and returns the structured output.""" + flat_outputs = self.call_flat(*args) + return self.function_type.pack_output(flat_outputs) + + def call_flat(self, *args: core.Tensor) -> Sequence[core.Tensor]: + """Calls with flat tensor inputs and returns flat tensor outputs. + + Args: + *args: arguments to call this function with. + + Returns: + The outputs of the function call. + + Raises: + ValueError: if the number of arguments is incorrect. + FunctionAlreadyGarbageCollectedError: if the function is no longer + available to be called because it has been garbage collected. + """ + expected_len = len(self.cached_definition.signature.input_arg) + if len(args) != expected_len: + raise ValueError( + f"Signature specifies {expected_len} arguments, got: {len(args)}." + f" Expected inputs: {self.cached_definition.signature.input_arg}." + f" Received inputs: {args}." + f" Function Type: {self.function_type!r}" + ) + + with InterpolateRuntimeError(self): + with ops.control_dependencies(self._call_options.control_captures): + # The caller must use record_operation to record this operation in the + # eager case, so we enforce the same requirement for the non-eager + # case by explicitly pausing recording. We don't have a gradient + # registered for PartitionedCall, so recording this operation confuses + # forwardprop code (GradientTape manages to ignore it). + with record.stop_recording(): + if self._bound_context.executing_eagerly(): + outputs = self._bound_context.call_function( + self.name, + list(args), + len(self.function_type.flat_outputs), + ) + else: + outputs = make_call_op_in_graph( + self, + list(args), + self._bound_context.function_call_options.as_attrs(), + ) + + for i, output_type in enumerate(self.function_type.flat_outputs): + handle_data = output_type.dtype._handle_data # pylint: disable=protected-access + if handle_data: + handle_data_util.set_handle_data( + outputs[i], handle_data.shape_inference + ) + + # TODO(fmuham): Use FunctionType cast here for all cases. + if not self._bound_context.executing_eagerly(): + for i, output_type in enumerate(self.function_type.flat_outputs): + outputs[i].set_shape(output_type.shape) + + return outputs + + def __call__(self, *args, **kwargs) -> Any: + if self.function_type.captures: + raise ValueError( + "The FunctionType defines captured inputs. Use call_with_captures" + " instead." + ) + + return self.call_with_captures(args, kwargs, []) + + def __del__(self): + if self._generated_graph: + func_graph_module.dismantle_func_graph(self._generated_graph) + + key = (self._bound_context.function_scope_id, self.name) + RUNTIME_FUNCTION_REFS[key] -= 1 + if RUNTIME_FUNCTION_REFS[key] < 0: + raise RuntimeError( + f"AtomicFunction Refcounting for {self.name} is invalid." + ) + + if RUNTIME_FUNCTION_REFS[key] == 0: + try: + self._bound_context.remove_function(self.name) + RUNTIME_FUNCTION_REFS.pop(key) + except TypeError: + # Suppress some exceptions, mainly for the case when we're running on + # module deletion. Things that can go wrong include the context module + # already being unloaded, self._handle._handle_data no longer being + # valid, and so on. Printing warnings in these cases is silly + # (exceptions raised from __del__ are printed as warnings to stderr). + pass # 'NoneType' object is not callable when the handle has been + # partially unloaded. + except AttributeError: + pass # 'NoneType' object has no attribute 'eager_mode' when context has + # been unloaded. Will catch other module unloads as well. + + def __str__(self): + return f" {compat.as_str(self.name)}{self.function_type}" + + def __repr__(self): + return ( + f"AtomicFunction(name={self.name},\n" + f"bound_context={self._bound_context},\n" + f"function_type={self.function_type!r},\n" + f"children={self._children!s},\n" + f"call_options={self._call_options},\n" + f"cached_graph={self._cached_graph})" + ) + + +def _set_read_only_resource_inputs_attr( + op: ops.Operation, func_graph: func_graph_module.FuncGraph +): + """Sets the list of resource inputs which are read-only. + + This is used by AutomaticControlDependencies. + + Args: + op: PartitionedCall Operation. + func_graph: FuncGraph. + """ + read_only_indices = acd.get_read_only_resource_input_indices_graph(func_graph) + ops.set_int_list_attr( + op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, read_only_indices + ) + + +def partitioned_call_op( + name: str, + args: Sequence[core.Tensor], + is_stateful: bool, + tout: Sequence[Any], + config: Any = None, + executor_type: Optional[str] = None, + xla_compile_attr: Any = None, +) -> ops.Operation: + """Generates a function call op respecting device annotations. + + Args: + name: Name of the function to call. + args: The arguments of the function, including captured inputs. + is_stateful: If the function is stateful. + tout: a list containing the output dtypes enums + config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If `None`, + all optimizations are disabled. Currently only handled for eager defined + functions. + executor_type: (Optional) A string for the name of the executor to be used + in the function call. If not set, or set to an empty string, the default + tensorflow executor will be used. + xla_compile_attr: (Optional) value of the XLA compilation attribute. + + Returns: + Returns the operation. + """ + if config is None: + config = function_utils.get_disabled_rewriter_config() + + if executor_type is None: + executor_type = "" + + # The generated binding returns an empty list for functions that don't + # return any Tensors, hence the need to use `create_op` directly. + args = [ops.convert_to_tensor(x) for x in args] + tin_attr = attr_value_pb2.AttrValue( + list=attr_value_pb2.AttrValue.ListValue( + type=[x.dtype.as_datatype_enum for x in args] + ) + ) + tout_attr = attr_value_pb2.AttrValue( + list=attr_value_pb2.AttrValue.ListValue(type=tout) + ) + func_attr = attr_value_pb2.AttrValue( + func=attr_value_pb2.NameAttrList(name=name) + ) + executor_type_attr = attr_value_pb2.AttrValue( + s=compat.as_bytes(executor_type) + ) + + # When running in graph mode, the graph and function graphs are optimized + # (i.e. run through grappler) per the session options, so we can disable any + # eager-specific rewriting. + config_proto = attr_value_pb2.AttrValue(s=config) + + op_name = "StatefulPartitionedCall" if is_stateful else "PartitionedCall" + + # Propagate the attribute indicating the need to compile from function to the + # call itself. + op_attrs = { + "Tin": tin_attr, + "Tout": tout_attr, + "f": func_attr, + "config_proto": config_proto, + "executor_type": executor_type_attr, + } + if xla_compile_attr is not None: + op_attrs[attributes_lib.XLA_COMPILE] = xla_compile_attr + + op = ops.get_default_graph().create_op( + op_name, args, tout, name=op_name, attrs=op_attrs + ) + return op + + +def make_call_op_in_graph( + atomic: AtomicFunction, + tensor_inputs: Sequence[core.Tensor], + context_call_attrs: Dict[str, Any], +): + """Adds an AtomicFunction to graph.""" + graph = ops.get_default_graph() + graph._add_function_recursive(atomic) # pylint: disable=protected-access + + op = partitioned_call_op( + name=atomic.name, + args=tensor_inputs, + is_stateful=atomic.call_options.is_stateful, + tout=[ + o.dtype.as_datatype_enum for o in atomic.function_type.flat_outputs + ], + config=context_call_attrs["config_proto"], + executor_type=context_call_attrs["executor_type"], + xla_compile_attr=atomic.cached_definition.attr.get( + attributes_lib.XLA_COMPILE, None + ), + ) + _set_read_only_resource_inputs_attr(op, atomic.graph) + + ops.set_int_list_attr( + op, + acd.COLLECTIVE_MANAGER_IDS, + atomic._call_options.collective_manager_ids_used, # pylint: disable=protected-access + ) + + return op.outputs + + +def from_function_def( + function_def: function_pb2.FunctionDef, + function_type: function_type_lib.FunctionType, +) -> AtomicFunction: + """Create a new AtomicFunction from FunctionDef + FunctionType.""" + bound_context = context.context() + if bound_context.has_function(compat.as_bytes(function_def.signature.name)): + raise ValueError("Function already registered in context.") + + bound_context.add_function_def(function_def) + return AtomicFunction( + function_def.signature.name, bound_context, function_type + ) + + +def from_func_graph( + name: Union[str, bytes], + graph: func_graph_module.FuncGraph, + attrs: Dict[str, attr_value_pb2.AttrValue], + function_type: Optional[function_type_lib.FunctionType] = None, + overwrite: bool = False, +) -> AtomicFunction: + """Initializes an AtomicFunction from FuncGraph. + + Args: + name: str, the name for the created function. + graph: Graph, the graph containing the operations in the function + attrs: dict mapping names of attributes to their AttrValue values + function_type: known FunctionType to use, otherwise one is derived. + overwrite: overwrites function definition in the current context if needed + + Returns: + An AtomicFunction instance. + """ + if attrs and attributes_lib.IMPLEMENTS in attrs: + # The alternative is to silently drop "implements" tag + # but it seems likely it would lead to hard to catch bugs. + # Another alternative is to make func_body to preserve the order + # of arguments if variables are present. Yet another option + # is to automatically replace variables as arguments to functions + # to v.read_value() whenever "implements" tag is present + # Anytime we annotate existing function we probably want to wrap + # it with safe read_value for backward compatibility. + has_resource_vars = any( + inp.dtype == dtypes.resource for inp in graph.inputs + ) + + captured_inputs = graph.external_captures + graph.deferred_external_captures + assert not any( + (has_resource_vars, captured_inputs) + ), ( + 'Function {name} has "{attr}={value}" attribute and thus can not ' + "depend on any tensors outside of its signature or modify variables. " + "\n\nNote: variables are always captured and cause function " + "re-tracing for every variable called.\n" + " inputs: {inputs}\n captures: {captured}\n\n" + "To pass a variable to such function use " + "use variable.read_value().".format( + name=graph.name, + attr=attributes_lib.IMPLEMENTS, + value=attrs[attributes_lib.IMPLEMENTS], + inputs=graph.inputs, + captured=captured_inputs, + ) + ) + + input_ops = set(arg.op for arg in graph.inputs) + operations = [op for op in graph.get_operations() if op not in input_ops] + + graph_output_names = graph._output_names # pylint: disable=protected-access + if graph_output_names is not None and all( + ops.tensor_id(t) in graph_output_names for t in graph.outputs + ): + output_names = [ + compat.as_bytes(graph_output_names[ops.tensor_id(t)]) + for t in graph.outputs + ] + if len(set(output_names)) != len(output_names): + # There are duplicate names for some reason, probably an invalid + # signature. Revert to auto-naming. + output_names = [] + else: + output_names = [] + with graph._c_graph.get() as c_graph: # pylint: disable=protected-access + fn = pywrap_tf_session.TF_GraphToFunction_wrapper( + c_graph, + compat.as_str(name), + False, + [o._c_op for o in operations], # pylint: disable=protected-access + [t._as_tf_output() for t in graph.inputs], # pylint: disable=protected-access + [t._as_tf_output() for t in graph.outputs], # pylint: disable=protected-access + output_names, + [o._c_op for o in graph.control_outputs], # pylint: disable=protected-access + [], # control_output_names + None, + compat.as_str(""), + ) + + attrs = attributes_lib.parse_func_attrs(attrs or {}) + for attr_name, attr_value in attrs.items(): + serialized = attr_value.SerializeToString() + pywrap_tf_session.TF_FunctionSetAttrValueProto( + fn, compat.as_str(attr_name), serialized + ) + + name = compat.as_bytes(name) + bound_context = context.context() + + if overwrite and bound_context.has_function(name): + bound_context.remove_function(name) + + bound_context.add_c_function(fn) + pywrap_tf_session.TF_DeleteFunction(fn) + + call_options = CallOptions( + collective_manager_ids_used=getattr( + graph, "collective_manager_ids_used", [] + ), + control_captures=graph.function_captures.control, + is_stateful=any(op._is_stateful for op in operations), # pylint: disable=protected-access + ) + + if not function_type: + function_type = function_type_utils.derive_from_graph(graph) + + return AtomicFunction( + name, + bound_context, + function_type, + list(graph._functions.values()), # pylint: disable=protected-access, + call_options, + cached_graph=graph, + ) + + +def to_func_graph(atomic: AtomicFunction) -> func_graph_module.FuncGraph: + """Generate a FuncGraph from an AtomicFunction.""" + # pylint: disable=protected-access + input_signature, output_signature = function_type_lib.to_structured_signature( + atomic.function_type + ) + + with ops.Graph().as_default(): + # Insert dependencies in the default graph so the new graph can pull them. + for f in atomic.children: + ops.get_default_graph()._add_function(f) + + result = function_def_to_graph.function_def_to_graph( + atomic.definition, + structured_input_signature=input_signature, + structured_outputs=output_signature, + propagate_device_spec=True, + include_library_functions=False, + ) + for f in atomic.children: + result._add_function(f) + + # Set input shapes and handle data + for i, input_type in enumerate(atomic.function_type.flat_inputs): + handle_data = input_type.dtype._handle_data + if handle_data: + handle_data_util.set_handle_data( + result.inputs[i], handle_data.shape_inference + ) + result.inputs[i].set_shape(input_type.shape) + + # Set output shapes and handle data + for i, output_type in enumerate(atomic.function_type.flat_outputs): + handle_data = output_type.dtype._handle_data + if handle_data: + handle_data_util.set_handle_data( + result.outputs[i], handle_data.shape_inference + ) + result.outputs[i].set_shape(output_type.shape) + + result.collective_manager_ids_used = ( + atomic.call_options.collective_manager_ids_used, + ) + + # pylint: enable=protected-access + return result + + +class InterpolateRuntimeError(object): + """Context Manager that interpolates exceptions received by AtomicFunction.""" + + DENY_LIST_PHRASES = ["") + + error_message.append(message.strip()) + return "\n".join(error_message) + + def __enter__(self): + pass + + def __exit__(self, typ, exc, tb): + if not exc or not isinstance(exc, errors.OpError): + return False + exc = typing.cast(errors.OpError, exc) + message = compat.as_text(exc.message) + parsed_message, func_tags, node_tags = error_interpolation.parse_message( + message + ) + deepest_func = None + for func_tag in func_tags: + if func_tag.name == compat.as_str(self._func.name): + deepest_func = self._func + elif deepest_func: + next_func = None + for child_func in deepest_func.children: + if func_tag.name == compat.as_str(child_func.name): + next_func = child_func + break + if next_func is not None and isinstance(next_func, AtomicFunction): + deepest_func = next_func + if deepest_func: + exc._message = self.interpolate( + parsed_message, + [t.name for t in node_tags], + deepest_func.graph_debug_info, + ) + return False diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/attributes.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/attributes.py new file mode 100644 index 0000000000000000000000000000000000000000..2e9be8a94c9c98c3fc926ad66e915c60b6566f5f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/attributes.py @@ -0,0 +1,182 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""This file lists FunctionDef attributes and corresponding allowlists.""" + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.python.util import compat + +# IMPORTANT: The usage of all the attributes below should be considered tech +# debt and new additions to this list are discouraged. +# +# Historically, attributes have been used as means to pipe extra information +# down to runtime that is not related to the actual function definition itself. +# +# This information is better layered independently and future work is encouraged +# to pursue that direction instead. + +API_IMPLEMENTS = "api_implements" +API_PREFERRED_DEVICE = "api_preferred_device" +BACKWARD_FUNCTION = "backward_function_name" +DISABLE_ACD = "_disable_acd" +DISABLE_CALL_SHAPE_INFERENCE = "_disable_call_shape_inference" +DISABLE_SUMMARIES_AT_RUNTIME = "disable_summaries_at_runtime" +EAGER_RUNTIME_CONSTRUCTION_CONTEXT = "_construction_context" +FORWARD_FUNCTION = "forward_function_name" +GO_BACKWARDS = "go_backwards" +IMPLEMENTS = "_implements" +INPUT_SHAPES = "_input_shapes" +INTS_ON_DEVICE = "experimental_ints_on_device" +NO_INLINE = "_noinline" +ORIGINAL_FUNCTION_NAME = "_original_func_name" +OUTPUTS_ON_OP_DEVICE = "_OutputsOnOpDevice" +QUANTIZED_COMPOSITE_FUNCTION = "tf_quant.composite_function" +QUANTIZED_OPS = "tf_quant.quantized_ops" +RUNTIME_CONSTANT_OPTIMIZATION = "runtime_constant_optimization" +SHARED_RENDEZVOUS = "shared_rendezvous" +TF_DATA_FUNCTION = "_tf_data_function" +TFTRT_ALLOW_BUILD_AT_RUNTIME = "_tftrt_allow_build_at_runtime" +TFTRT_CONVERT_FUNCTION = "_tftrt_convert_function" +TFTRT_IS_DYN_OP = "_tftrt_is_dyn_op" +TFTRT_LOGGER = "_tftrt_trt_logger_name" +TFTRT_MAX_BATCH_SIZE = "_tftrt_max_batch_size" +TFTRT_MAX_CACHED_ENGINES = "_tftrt_max_cached_engines" +TFTRT_MAX_WORKSPACE_SIZE = "_tftrt_max_workspace_size_bytes" +TFTRT_MIN_SEGMENT_SIZE = "_tftrt_minimum_segment_size" +TFTRT_PRECISION_MODE = "_tftrt_precision_mode" +TFTRT_PROFILE_STRATEGY = "_tftrt_profile_strategy" +TFTRT_USE_CALIBRATION = "_tftrt_use_calibration" +TFTRT_USE_IMPLICIT_BATCH = "_tftrt_use_implicit_batch" +TIME_MAJOR = "time_major" +XLA_COMPILE = "_XlaMustCompile" +XLA_COMPILE_OPTIONAL = "_XlaCompile" +XLA_SCOPE = "_XlaScope" +XLA_SEPERATE_COMPILED_GRADIENTS = "_XlaSeparateCompiledGradients" + +POLYMORPHIC_FUNCTION_ALLOWLIST = frozenset({ + API_IMPLEMENTS, + API_PREFERRED_DEVICE, + DISABLE_ACD, + DISABLE_SUMMARIES_AT_RUNTIME, + GO_BACKWARDS, + IMPLEMENTS, + INTS_ON_DEVICE, + NO_INLINE, + RUNTIME_CONSTANT_OPTIMIZATION, + TF_DATA_FUNCTION, + TIME_MAJOR, + OUTPUTS_ON_OP_DEVICE, +}) + +TRACING_COMPILATION_ALLOWLIST = frozenset().union( + POLYMORPHIC_FUNCTION_ALLOWLIST, + { + SHARED_RENDEZVOUS, + XLA_COMPILE, + }, +) + +MONOMORPHIC_FUNCTION_ALLOWLIST = frozenset().union( + TRACING_COMPILATION_ALLOWLIST, + { + BACKWARD_FUNCTION, + DISABLE_CALL_SHAPE_INFERENCE, + EAGER_RUNTIME_CONSTRUCTION_CONTEXT, + FORWARD_FUNCTION, + INPUT_SHAPES, + ORIGINAL_FUNCTION_NAME, + QUANTIZED_COMPOSITE_FUNCTION, + QUANTIZED_OPS, + TFTRT_ALLOW_BUILD_AT_RUNTIME, + TFTRT_CONVERT_FUNCTION, + TFTRT_IS_DYN_OP, + TFTRT_LOGGER, + TFTRT_MAX_BATCH_SIZE, + TFTRT_MAX_CACHED_ENGINES, + TFTRT_MAX_WORKSPACE_SIZE, + TFTRT_MIN_SEGMENT_SIZE, + TFTRT_PRECISION_MODE, + TFTRT_PROFILE_STRATEGY, + TFTRT_USE_CALIBRATION, + TFTRT_USE_IMPLICIT_BATCH, + XLA_COMPILE_OPTIONAL, + XLA_SCOPE, + XLA_SEPERATE_COMPILED_GRADIENTS, + }, +) + + +def _parse_func_attr_value(key, value): + """Converts a python object to an attr_value_pb2.AttrValue object.""" + if isinstance(value, attr_value_pb2.AttrValue): + return value + # bool type check has to happen before int since bool is a subclass of int. + elif isinstance(value, bool): + return attr_value_pb2.AttrValue(b=value) + elif isinstance(value, int): + return attr_value_pb2.AttrValue(i=value) + elif isinstance(value, float): + return attr_value_pb2.AttrValue(f=value) + elif isinstance(value, (str, bytes)): + return attr_value_pb2.AttrValue(s=compat.as_bytes(value)) + elif isinstance(value, list): + list_value = attr_value_pb2.AttrValue.ListValue() + for v in value: + if isinstance(v, bool): + list_value.b.append(v) + elif isinstance(v, int): + list_value.i.append(v) + elif isinstance(v, float): + list_value.f.append(v) + elif isinstance(v, (str, bytes)): + list_value.s.append(compat.as_bytes(v)) + else: + raise ValueError( + f"Attributes for {key} must be bool, int, float, or string. " + f"Got {type(v)}." + ) + return attr_value_pb2.AttrValue(list=list_value) + else: + raise ValueError( + f"Attribute {key} must be bool, int, float, string, list, or " + f"AttrValue. Got {type(value)}." + ) + + +def parse_func_attrs(attributes, allowlist=None): + """Convert the keyword arguments into function_def attributes. + + Currently only support primitive types: bool, int, float and string. + + Args: + attributes: the dictionary of attributes. + allowlist: set of attribute names allowed. + Returns: + A dict of attributes where the key is the name of attribute and the value + is the AttrValue proto. + Raises: + ValueError: If the kwargs contains unallowlisted name or unsupported value + types. + """ + if not allowlist: + allowlist = MONOMORPHIC_FUNCTION_ALLOWLIST + + attrs = {} + for key, value in attributes.items(): + if key not in allowlist: + raise ValueError( + f"Allowlist does not support `{key}` as an attribute.") + attrs[key] = _parse_func_attr_value(key, value) + return attrs diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/autograph_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/autograph_util.py new file mode 100644 index 0000000000000000000000000000000000000000..958dea68e694e1dae728effcb4d4521b0d17078e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/autograph_util.py @@ -0,0 +1,59 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""Autograph utility functions for polymorphic_function.""" + +from tensorflow.python.autograph.core import converter +from tensorflow.python.autograph.impl import api +from tensorflow.python.util import tf_decorator + + +def py_func_from_autograph( + python_func, + autograph_options=None, +): + """Compile a python function using autograph, for use with FuncGraph. + + Args: + python_func: the Python function to compile. + autograph_options: additional knobs to control when `autograph=True`. + See https://www.tensorflow.org/guide/autograph for more information. + Returns: + python_func, converted using autograph. + """ + _, original_func = tf_decorator.unwrap(python_func) + + def autograph_handler(*args, **kwargs): + """Calls a converted version of original_func.""" + try: + return api.converted_call( + original_func, + args, + kwargs, + options=converter.ConversionOptions( + recursive=True, + optional_features=autograph_options, + user_requested=True, + )) + except Exception as e: # pylint:disable=broad-except + if hasattr(e, "ag_error_metadata"): + raise e.ag_error_metadata.to_exception(e) + else: + raise + + # Wrapping around a decorator allows checks like tf_inspect.getargspec + # to be accurate. + converted_func = tf_decorator.make_decorator(original_func, autograph_handler) + return tf_decorator.rewrap(python_func, original_func, converted_func) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/compiler_ir.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/compiler_ir.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb959fb9e9697e67e5a9c4fdd0772d5aa953332 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/compiler_ir.py @@ -0,0 +1,128 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implmentation for defining get_compiler_ir.""" +from typing import List, Optional +import warnings + +from tensorflow.core.function import trace_type +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import random_ops +from tensorflow.python.util import nest + + +def maybe_get_device_name(device_name): + # TODO(cheshire): This is a hack to get the current "preferred" device, + # there is no current API to get it otherwise. + if device_name is None: + device_name = random_ops.random_normal([]).device + return device_name + + +# TODO(fmuham): Use trace_type._flatten here instead when available +def make_handledata_tensor_specs(resource_vars): + """Convert tf.Variable list to its corresponding TensorSpec list.""" + if not all(x.dtype is dtypes.resource for x in resource_vars): + raise RuntimeError("Resource_vars must be tf.resource list.") + inner_context = trace_type.InternalTracingContext() + trace_type_inputs = trace_type.from_value( + tuple(resource_vars), inner_context + ).components + + def to_resource_spec(traced_input): + try: + handle_data = traced_input.dtype._handle_data.shape_inference # pylint: disable=protected-access + shape_and_type = handle_data.shape_and_type[0] + spec = tensor_spec.TensorSpec( + shape=shape_and_type.shape, dtype=shape_and_type.dtype + ) + return spec + except Exception as e: + raise ValueError( + "Fail to convert tf.Variable list to TensorSpec list. The error" + " is: %s" % e + ) from e + + return [to_resource_spec(trace_type) for trace_type in trace_type_inputs] + + +def from_concrete_function( + concrete_fn, + specialized_flat_specs: Optional[List[tensor_spec.TensorSpec]] = None, +): + """Generate the Compiler Ir from tf concrete function with TensorSpec. + + Args: + concrete_fn: returned by using get_concrete_function. + specialized_flat_specs: specialized flat tf.TensorSpecs for function args. + + Returns: + Function callable that generate the HLO text. + + Raises: + ValueError: if concrete_fn is not "compilable" without concrete + inputs. + """ + context.ensure_initialized() + fn_name = concrete_fn.name + filtered_flat_specs = specialized_flat_specs or list( + nest.flatten(concrete_fn.structured_input_signature) + ) + + if not all(s.shape.is_fully_defined() for s in filtered_flat_specs): + raise ValueError( + f"Only support static input shape but got inputs = {concrete_fn.inputs}" + ) + + def compiler_ir_generator(stage="hlo", device_name=None, platform_name=None): + """Gets the compiler IR bytes. + + Args: + stage: The exported stage for the given function. + device_name: The name of the device with the form as + "/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc. + When this is used, actual device is needed for getting the compiler IR. + platform_name: The name of the platform, e.g. "TPU". See the comment in + `get_compiler_ir` in `context.py`. + + Returns: + The compiler IR bytes. + """ + if device_name is not None: + if platform_name is not None: + raise ValueError( + "device_name and platform_name cannot be provided at the same time." + ) + warnings.warn("device_name is being deprecated. Use platform_name.") + device_name = maybe_get_device_name(device_name) + res_bytes = context.context().get_compiler_ir( + device_name=device_name, + platform_name=platform_name, + function_name=fn_name, + flat_args=filtered_flat_specs, + captured_inputs=concrete_fn.captured_inputs, + stage=stage, + ) + if stage in ( + "hlo_serialized", + "optimized_hlo_serialized", + "optimized_hlo_proto_serialized", + ): + return res_bytes + else: + return res_bytes.decode("utf-8") + + return compiler_ir_generator diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/composite_tensor_utils.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/composite_tensor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b7f0523e9da3431174d6faa0b6c12e3c7217daf1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/composite_tensor_utils.py @@ -0,0 +1,39 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility to manipulate CompositeTensors in tf.function.""" + +from tensorflow.python.framework import composite_tensor +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util import nest + + +# TODO(b/240337581, b/240337099): Remove this function when we de-alias +# dt_resource tensors or tf.nest support is_leaf. +def flatten_with_variables(inputs): + """Flattens `inputs` but don't expand `ResourceVariable`s.""" + # We assume that any CompositeTensors have already converted their components + # from numpy arrays to Tensors, so we don't need to expand composites here for + # the numpy array conversion. Instead, we do so because the flattened inputs + # are eventually passed to ConcreteFunction()._call_flat, which requires + # expanded composites. + flat_inputs = [] + for value in nest.flatten(inputs): + if (isinstance(value, composite_tensor.CompositeTensor) and + not _pywrap_utils.IsResourceVariable(value)): + components = value._type_spec._to_components(value) # pylint: disable=protected-access + flat_inputs.extend(flatten_with_variables(components)) + else: + flat_inputs.append(value) + return flat_inputs diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/concrete_function.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/concrete_function.py new file mode 100644 index 0000000000000000000000000000000000000000..a68acdd94d40e07c452908842edaea2df09700c1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/concrete_function.py @@ -0,0 +1,1771 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""Implementation for ConcreteFunction.""" + +import collections + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.function.polymorphism import function_type as function_type_lib +from tensorflow.python import pywrap_tfe +from tensorflow.python.eager import backprop_util +from tensorflow.python.eager import context +from tensorflow.python.eager import forwardprop_util +from tensorflow.python.eager import record +from tensorflow.python.eager.graph_only_ops import graph_placeholder +from tensorflow.python.eager.polymorphic_function import atomic_function +from tensorflow.python.eager.polymorphic_function import attributes as attributes_lib +from tensorflow.python.eager.polymorphic_function import function_type_utils +from tensorflow.python.eager.polymorphic_function import saved_model_exported_concrete +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import func_graph as func_graph_module +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import type_spec +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import default_gradient +from tensorflow.python.ops import gradients_util +from tensorflow.python.ops import handle_data_util +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.profiler import trace +from tensorflow.python.trackable import base as trackable +from tensorflow.python.types import core +from tensorflow.python.util import compat +from tensorflow.python.util import nest +from tensorflow.python.util import object_identity + + +def _is_type_subset(a, b): + """Returns true if `b` is a subset of type `a` (or if a is not a TypeSpec.)""" + if isinstance(a, type_spec.TypeSpec): + return a.most_specific_compatible_type(b) == a + return True + + +_FORWARD_PREFIX = "__forward_" +_BACKWARD_PREFIX = "__backward_" +_INFERENCE_PREFIX = "__inference_" + + +def _forward_name(n): + """The name of a generated forward defun named n.""" + return "%s%s_%s" % (_FORWARD_PREFIX, n, ops.uid()) + + +def _backward_name(n): + """The name of a generated backward defun named n.""" + return "%s%s_%s" % (_BACKWARD_PREFIX, n, ops.uid()) + + +def _inference_name(n): + """The name of a forward-but-no-gradient defun named n.""" + return "%s%s_%s" % (_INFERENCE_PREFIX, n, ops.uid()) + + +def _create_forward_backward_with_graph( + attrs, forward_graph, backwards_graph: func_graph_module.FuncGraph +): + """Creates forward and backward functions from the function graphs.""" + forward_function_name = _forward_name(forward_graph.name) + common_attributes = dict(attrs) + # NB: forward and backward function need to drop "_implements". + # attribute, because their signature contains all the intermediate tensors + # that they compute. Thus they don't have a stable signature which can + # be directly optimized downstream. + # See for more details: + # https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md#appendix-future-support-for-optimizing-gradient-functions + common_attributes.pop(attributes_lib.IMPLEMENTS, None) + backward_function_attr = attributes_lib.parse_func_attrs( + {attributes_lib.FORWARD_FUNCTION: forward_function_name}) + backward_function_attr.update(common_attributes) + # TODO(fmuham): Include inputs as well. + function_type = function_type_lib.from_structured_signature( + ((), {}), + backwards_graph.structured_outputs, + backwards_graph.function_captures.capture_types, + ) + backward_function = ConcreteFunction.from_func_graph( + backwards_graph, function_type, attrs=backward_function_attr + ) + forward_function_attr = attributes_lib.parse_func_attrs( + {attributes_lib.BACKWARD_FUNCTION: backward_function.name} + ) + forward_function_attr.update(common_attributes) + forward_function = atomic_function.from_func_graph( + forward_function_name, forward_graph, forward_function_attr + ) + return forward_function, backward_function + + +class _DelayedRewriteGradientFunctions(object): + """Caches forward/backward functions with a delayed forward rewrite.""" + + def __init__( + self, atomic_fn: atomic_function.AtomicFunction, func_graph_deleter + ): + """Construct an inference function and initialize caches.""" + # A map from the number of forward function outputs with accepted gradients + # to forward and backward functions, used to cache non-tape backward + # function generation. + self._cached_function_pairs = {} + self._func_graph = atomic_fn.graph + self._inference_function = atomic_fn + self._attrs = atomic_fn.attributes + self._gradient_name = None + # Note that the FuncGraph is mutated later, so we need to inspect it now to + # figure out the user-specified outputs of the inference function. + self._num_inference_outputs = len(self._func_graph.outputs) + self._func_graph_deleter = func_graph_deleter + + def forward_backward(self, num_doutputs=None): + """A possibly-cached pair of forward and backward functions.""" + if num_doutputs is None: + num_doutputs = self._num_inference_outputs + forward_backward = self._cached_function_pairs.get(num_doutputs) + if forward_backward is not None: + return forward_backward + forward, backward = self._construct_forward_backward(num_doutputs) + self._cached_function_pairs[num_doutputs] = (forward, backward) + return forward, backward + + def _construct_forward_backward(self, num_doutputs): + """Constructs a pair of forward and backward functions. + + Args: + num_doutputs: The constructed backprop function will take output gradients + for the first `num_doutputs` outputs of the forward function. Defaults + to the number of outputs for the inference function, but when + higher-order gradients are computed this will increase to include side + outputs. + + Returns: + A pair of (forward_function, backward_function): + forward_function: A re-generated inference function (an + AtomicFunction) to account for new side outputs, if any extra + were required when building the backward pass. + backward_function: A ConcreteFunction that Takes `num_doutputs` + arguments and returns gradients with respect to inputs of the forward + function. + """ + trainable_outputs = [ + output for output in self._func_graph.outputs[:num_doutputs] + if backprop_util.IsTrainable(output)] + + signature = [] + for t in trainable_outputs: + signature.append( + tensor_lib.TensorSpec(*default_gradient.shape_and_dtype(t))) + + def _backprop_function(*grad_ys): + with ops.device(None): + return gradients_util._GradientsHelper( # pylint: disable=protected-access + trainable_outputs, + self._func_graph.inputs, + grad_ys=grad_ys, + src_graph=self._func_graph) + + with self._func_graph.as_default(): + backwards_graph = func_graph_module.FuncGraph( + _backward_name(self._func_graph.name)) + func_graph_module.func_graph_from_py_func( + name=backwards_graph.name, + python_func=_backprop_function, + args=[], kwargs={}, + signature=signature, + func_graph=backwards_graph) + backwards_graph_captures = backwards_graph.external_captures + captures_from_forward = [ + c for c in backwards_graph_captures if + not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph] + + existing_outputs = object_identity.ObjectIdentitySet( + self._func_graph.outputs) + for capture in captures_from_forward: + if capture not in existing_outputs: + existing_outputs.add(capture) + self._func_graph.outputs.append(capture) + + forward_function, backward_function = _create_forward_backward_with_graph( + self._attrs, self._func_graph, backwards_graph) + return forward_function, backward_function + + def _rewrite_forward_and_call_backward(self, op: ops.Operation, *doutputs): + """Add outputs to the forward call and feed them to the grad function.""" + forward_function, backwards_function = self.forward_backward(len(doutputs)) + if not backwards_function.outputs: + return backwards_function.structured_outputs + + op.graph._add_function_recursive(forward_function) # pylint: disable=protected-access + + # pylint: disable=protected-access + # Rewrite an inference call op to be a forward call op + op._set_func_attr("f", forward_function.name) + op._set_type_list_attr( + "Tout", + [ + o.dtype.as_datatype_enum + for o in forward_function.function_type.flat_outputs + ], + ) + truncated_outputs = forward_function.function_type.flat_outputs[ + len(op.outputs) : + ] + op._add_outputs( + [o.dtype.as_datatype_enum for o in truncated_outputs], + [o.shape for o in truncated_outputs], + ) + for i in range(len(op.outputs)): + output_type = forward_function.function_type.flat_outputs[i] + handle_data = output_type.dtype._handle_data + if handle_data: + handle_data_util.set_handle_data( + op.outputs[i], handle_data.shape_inference + ) + # pylint: enable=protected-access + + capture_mapping = dict( + zip((ops.tensor_id(t) for t in self._func_graph.outputs), op.outputs)) + remapped_captures = [ + capture_mapping.get(ops.tensor_id(capture), capture) + for capture in backwards_function.captured_inputs + ] + + # Replace Nones with zeros since we're calling a graph function which + # expects numeric inputs. + cleaned_doutputs = [] + for doutput, placeholder in zip(doutputs, self._func_graph.outputs): + if backprop_util.IsTrainable(placeholder): + if isinstance(doutput, indexed_slices.IndexedSlices): + # Gradient passed to a backward ConcreteFunction must be tf.Tensor, + # so we convert tf.IndexedSlices to tf.Tensor. + cleaned_doutputs.append(ops.convert_to_tensor(doutput)) + elif doutput is not None: + cleaned_doutputs.append(doutput) + else: + cleaned_doutputs.append(default_gradient.zeros_like(placeholder)) + + # Compute the gradients using the side outputs + return backwards_function._call_flat( # pylint: disable=protected-access + cleaned_doutputs, remapped_captures) + + def get_gradient_function(self): + """Returns gradient function. + + The gradient rewrites an inference call op to a forward call op, but does + not modify a pre-existing forward call op. It then computes the gradient + from the output's gradients and the side outputs of the forward op. + """ + return self._rewrite_forward_and_call_backward + + def forward(self, inference_args=None, input_tangents=None): + """A forward function with only user-specified outputs. + + The call operation for the returned inference function can be rewritten into + a forward function. This only happens if the backward function (from the + `backward` method) ends up being used to compute gradients. + + This approach avoids constructing unnecessary graphs, but it only works if + we are calling this function when not executing eagerly. + + Args: + inference_args: A flat list of Tensors, arguments to the inference + function. Unused, but taken for compatibility with + _TapeGradientFunctions. + input_tangents: A flat list of Tensors, jvps associated with + `inference_args`. Unused; if required, tape functions must be used + instead. + + Returns: + An atomic_function.AtomicFunction. + """ + del inference_args # unused + if input_tangents: + # This class does not support special-cased forwardprop. The arguments are + # here for compatibility with _TapeGradientFunctions. + raise errors.InternalError("unexpectedly got forwardprop information in " + "a class that does not support forwardprop.") + return self._inference_function + + def _backward(self, outputs): + """Fetch a backward function for `outputs` from the forward function.""" + def _backward_function(*args): + call_op = outputs[0].op + return self._rewrite_forward_and_call_backward(call_op, *args) + return _backward_function, outputs + + def record(self, flat_outputs, inference_args, input_tangents): + """Record the function call operation. + + _DelayedRewriteGradientFunctions supports only first-order backprop tape + gradients (and then only when graph building). It does not work with + higher-order tape gradients or forward autodiff, but does work with + higher-order symbolic gradients (tf.gradients). + + Args: + flat_outputs: The result of running `forward`. + inference_args: A flat list of Tensors with inference inputs to the + operation. + input_tangents: A flat list of Tensors with input tangents consumed by the + operation. + """ + backward_function, to_record = self._backward(flat_outputs) + record.record_operation( + self._inference_function.cached_definition.signature.name, + to_record, + inference_args + input_tangents, + backward_function, + ) + + +# Contains information about a forward function wrapped to compute jvps. +_ForwardWrapper = collections.namedtuple( + "_ForwardWrapper", ( + # The wrapper Graph. + "graph", + # A flat list of non-tangent Tensor outputs from the wrapped forward + # function. + "outputs", + # Indices for output tangents, same format as + # forwardprop_util.pack_tangents. + "output_indices", + # A flat list of tangents for `outputs`. + "output_tangents")) + + +class _TapeGradientFunctions(object): + """Caches forward and backward functions compatible with eager gradients. + + In contrast to the delayed-rewrite approach in + `_DelayedRewriteGradientFunctions` which only works with delayed execution, + the forward function generated by this class has a fixed set of outputs which + may be preserved by a tape in order to compute gradients later. + + This class is abstract; its child classes differ in how many side outputs of + the forward function their backward function accepts gradients for, which + determines whether higher-order tape gradients are possible. + """ + + def __init__( + self, + func_graph: func_graph_module.FuncGraph, + attrs, + func_graph_deleter, + forwardprop_input_indices, + delayed_rewrite_functions, + need_gradients_for_jvps, + ): + self._func_graph = func_graph + self._forward_graph = None + self._attrs = attrs + self._forward = None + self._backward = None + self._num_outputs = len(func_graph.outputs) + self._func_graph_deleter = func_graph_deleter + self._forwardprop_input_indices = forwardprop_input_indices + self._forwardprop_output_indices = None + self._num_forwardprop_outputs = 0 + self._num_inference_outputs = len(func_graph.outputs) + self._num_trainable_inference_outputs = len( + [t for t in func_graph.outputs if backprop_util.IsTrainable(t)]) + self._delayed_rewrite_functions = delayed_rewrite_functions + self._need_gradients_for_jvps = need_gradients_for_jvps + + def _build_functions_for_outputs( + self, outputs, inference_args, input_tangents): + """Forward+backward functions where the backward function sees `outputs`.""" + # First figure out which of `outputs` are trainable. We'll accept gradients + # for each of these in the backward function. + trainable_outputs = [] + trainable_indices = [] + for index, output in enumerate(outputs): + + if backprop_util.IsTrainable(output): + trainable_outputs.append(output) + trainable_indices.append(index) + + backwards_graph = func_graph_module.FuncGraph( + _backward_name(self._func_graph.name)) + with backwards_graph.as_default(): + gradients_wrt_outputs = [] + for output in trainable_outputs: + gradient_shape, gradient_dtype = default_gradient.shape_and_dtype( + output) + gradient_placeholder = graph_placeholder(gradient_dtype, gradient_shape) + handle_data_util.copy_handle_data(output, gradient_placeholder) + gradients_wrt_outputs.append(gradient_placeholder) + with ops.device(None): + gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access + trainable_outputs, + self._func_graph.inputs, + grad_ys=gradients_wrt_outputs, + src_graph=self._func_graph) + + if input_tangents: + # Convert IndexedSlices to dense tensors (as we do elsewhere for + # function gradients). Our C++ bindings don't know how to handle them + # currently. + gradients_wrt_inputs = nest.map_structure( + lambda x: ops.convert_to_tensor(x) if x is not None else None, + gradients_wrt_inputs) + captures_from_forward = [ + c for c in backwards_graph.external_captures + if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph + ] + existing_outputs = object_identity.ObjectIdentitySet( + self._func_graph.outputs) + for capture in captures_from_forward: + if capture not in existing_outputs: + existing_outputs.add(capture) + self._func_graph.outputs.append(capture) + + # The ordering of `backwards_graph.inputs` is important: inputs of + # `backward_function` correspond to outputs (including + # side outputs) of `self._tape_forward_function`. + backwards_graph.inputs = ( + gradients_wrt_outputs + backwards_graph.internal_captures) + backwards_graph.outputs.extend( + grad + for grad in nest.flatten(gradients_wrt_inputs, expand_composites=True) + if grad is not None) + backwards_graph.structured_outputs = gradients_wrt_inputs + + forward_function, backward_function = _create_forward_backward_with_graph( + self._attrs, self._func_graph, backwards_graph) + + if not input_tangents: + # There is no need to special-case forwardprop, so we can return the + # forward+backward pair we've created without further wrapping. + return (forward_function, self._func_graph, backward_function, + # No forwardprop outputs. + None, 0) + forward_wrapper = self._wrap_forward_function_with_jvps( + forward_function, backward_function, inference_args, input_tangents) + (wrapped_backwards_graph, + forward_wrapper) = self._wrap_backward_function_with_jvp_backprop( + backward_function, gradients_wrt_outputs, forward_wrapper) + # Now that we've added new captures, we need to make sure forward outputs + # are in the same order the backward function expects them to be in: + # [inference outputs] + [jvps] + [side outputs] + [captures]. + forward_wrapper = self._shuffle_forward_outputs(forward_wrapper) + (wrapped_forward_function, + wrapped_backward_function) = _create_forward_backward_with_graph( + self._attrs, forward_wrapper.graph, wrapped_backwards_graph) + if (len(inference_args) + len(input_tangents) + != len(forward_wrapper.graph.inputs)): + raise errors.InternalError( + f"The forward graph had {len(forward_wrapper.graph.inputs)} inputs, " + f"but we expected {len(inference_args) + len(input_tangents)} " + f"({len(inference_args)} inference inputs and " + f"{len(input_tangents)} input tangents).") + return (wrapped_forward_function, forward_wrapper.graph, + wrapped_backward_function, forward_wrapper.output_indices, + len(forward_wrapper.output_tangents)) + + def _wrap_forward_function_with_jvps( + self, forward_function, backward_function, + inference_args, input_tangents): + """Adds inline JVP computation to a forward function.""" + forward_wrapper_graph = func_graph_module.FuncGraph( + _forward_name(self._func_graph.name)) + with forward_wrapper_graph.as_default(): + # Tell forward accumulators to free up space for new JVP computations, + # since one may be in the process of computing a JVP (if that computation + # triggered this function building). + # + # We'll make symbolic versions of input JVPs, run the forward function + # under forward accumulators to get symbolic output JVPs, then set those + # as outputs of the new wrapped forward function. + with forwardprop_util.push_forwardprop_state(): + forward_captures = { + ops.tensor_id(internal): external + for external, internal in self._func_graph.captures} + for input_index, real_input in enumerate(self._func_graph.inputs): + # This loop is more or less equivalent to running tf.identity on each + # of self._func_graph.inputs. However, doing that also captures jvps + # for resource handles, which confuses the jvp capturing code below + # (since primal inputs are interwoven with jvp inputs). + input_placeholder = array_ops.placeholder( + dtype=real_input.dtype, + shape=real_input.shape) + capture = forward_captures.get(ops.tensor_id(real_input)) + if capture is not None: + forward_wrapper_graph.add_capture(capture, input_placeholder) + if capture.dtype == dtypes.resource: + handle_data_util.copy_handle_data(capture, input_placeholder) + else: + forward_wrapper_graph.inputs.append(input_placeholder) + for inp, arg in zip(forward_wrapper_graph.inputs, inference_args): + record.record_operation( + "captured_value", [inp], [arg], + backward_function=lambda x: [x], + forward_function=lambda x: [x]) + num_inference_inputs = len(inference_args) + for tape_indices in self._forwardprop_input_indices: + for input_index, jvp_index in tape_indices: + input_placeholder = forward_wrapper_graph.inputs[input_index] + if len(forward_wrapper_graph.inputs) != jvp_index: + raise errors.InternalError( + f"Expected {jvp_index} forward graph inputs, " + f"got {len(forward_wrapper_graph.inputs)}.") + gradient_shape, gradient_dtype = default_gradient.shape_and_dtype( + input_placeholder) + jvp_placeholder = graph_placeholder(gradient_dtype, gradient_shape) + external_jvp = input_tangents[jvp_index - num_inference_inputs] + forward_wrapper_graph.add_capture(external_jvp, jvp_placeholder) + tensor_shape.TensorShape( + external_jvp.shape).assert_is_compatible_with( + jvp_placeholder.shape) + record.record_operation( + "captured_value", + [jvp_placeholder], + [external_jvp], + backward_function=lambda x: [x], + forward_function=lambda x: [x]) + forward_inputs = forward_wrapper_graph.inputs[:num_inference_inputs] + gradient_function = ( + self._delayed_rewrite_functions._rewrite_forward_and_call_backward) # pylint: disable=protected-access + with ops.get_default_graph()._override_gradient_function( # pylint: disable=protected-access + {"PartitionedCall": gradient_function, + "StatefulPartitionedCall": gradient_function}): + forward_outputs = forward_function.call_flat(*forward_inputs) + if isinstance(forward_outputs, ops.Operation): + # _wrapped_backward_function expects a list, but if the function has + # no outputs its call() returns an Operation. We need to undo that + # so we don't cause problems later. + forward_outputs = [] + py_backward, _ = self._wrap_backward_function( + self._func_graph, backward_function, forward_outputs) + # We will never request backward tape gradients for this operation + # directly since we're wrapping the call; forwardprop will call the + # backward function (and nested forward accumulators may build + # higher-order gradients), but any watching GradientTapes should ignore + # it. + # + # TODO(allenl): It might be better to explicitly stop backward recording + # so we don't use the second-order tape cases unnecessarily. + record.record_operation_forwardprop_only( + forward_function.cached_definition.signature.name, + forward_outputs, forward_inputs, py_backward, None) + output_indices, output_tangents = ( + pywrap_tfe.TFE_Py_PackJVPs(forward_outputs)) + output_tangents = [forward_wrapper_graph.capture(t) + for t in output_tangents] + return _ForwardWrapper( + graph=forward_wrapper_graph, outputs=forward_outputs, + output_indices=output_indices, output_tangents=output_tangents) + + def _wrap_backward_function_with_jvp_backprop( + self, backward_function, gradients_wrt_outputs, forward_wrapper): + """Wraps `backward_function` to include gradients for JVPs.""" + wrapped_backwards_graph = func_graph_module.FuncGraph( + _backward_name(self._func_graph.name)) + with wrapped_backwards_graph.as_default(): + py_backward, recorded_outputs = self._wrap_backward_function( + self._func_graph, backward_function, forward_wrapper.outputs) + trainable_index = 0 + forward_doutputs = [] + doutput_args = [] + for output in recorded_outputs: + if backprop_util.IsTrainable(output): + doutput = gradients_wrt_outputs[trainable_index] + doutput_placeholder = graph_placeholder(doutput.dtype, doutput.shape) + doutput_args.append(doutput_placeholder) + forward_doutputs.append(doutput_placeholder) + trainable_index += 1 + else: + doutput_args.append(None) + + dinputs = py_backward(*doutput_args) + existing_outputs = object_identity.ObjectIdentitySet( + forward_wrapper.outputs + forward_wrapper.output_tangents) + num_processed_output_tangents = 0 + gradients_wrt_output_tangents = [] + tangent_doutputs = [] + output_tangents = forward_wrapper.output_tangents + output_indices = forward_wrapper.output_indices + if self._need_gradients_for_jvps: + # TODO(allenl): Consider using a throwaway graph to avoid extra gradient + # evaluations; gradients for jvps may have common subgraphs. + while num_processed_output_tangents != len(output_tangents): + for output in output_tangents[num_processed_output_tangents:]: + gradient_shape, gradient_dtype = default_gradient.shape_and_dtype( + output) + placeholder = graph_placeholder(gradient_dtype, gradient_shape) + gradients_wrt_output_tangents.append(placeholder) + tangent_doutputs.append(placeholder) + num_processed_output_tangents = len(output_tangents) + with ops.device(None): + gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access + output_tangents, + forward_wrapper.graph.inputs, + grad_ys=gradients_wrt_output_tangents, + src_graph=forward_wrapper.graph) + dinputs = [ + backprop_util.AggregateIndexedSlicesGradients((existing, new)) + for existing, new in zip(dinputs, gradients_wrt_inputs) + if existing is not None or new is not None] + dinputs.extend(gradients_wrt_inputs[len(dinputs):]) + captures_from_forward = [ + c for c in wrapped_backwards_graph.external_captures + if (not isinstance(c, ops.EagerTensor) + and c.graph is forward_wrapper.graph)] + for capture in captures_from_forward: + if capture not in existing_outputs: + existing_outputs.add(capture) + forward_wrapper.outputs.append(capture) + output_indices, output_tangents = ( + forwardprop_util.pack_tangents(forward_wrapper.outputs)) + output_tangents = [forward_wrapper.graph.capture(t) + for t in output_tangents] + for t in output_tangents: + existing_outputs.add(t) + wrapped_backwards_graph.inputs = ( + forward_doutputs[:self._num_trainable_inference_outputs] + + tangent_doutputs + + forward_doutputs[self._num_trainable_inference_outputs:] + + wrapped_backwards_graph.internal_captures) + wrapped_backwards_graph.structured_outputs = dinputs + wrapped_backwards_graph.outputs = [t for t in dinputs if t is not None] + return (wrapped_backwards_graph, + forward_wrapper._replace(output_indices=output_indices, + output_tangents=output_tangents)) + + def _shuffle_forward_outputs(self, forward_wrapper): + """Reorders function outputs so captures are last.""" + def _index_map(original): + if original < self._num_inference_outputs: + return original + if original >= len(forward_wrapper.outputs): + return (original - len(forward_wrapper.outputs) + + self._num_inference_outputs) + return original + len(forward_wrapper.output_tangents) + output_indices = nest.map_structure( + _index_map, forward_wrapper.output_indices) + forward_wrapper.graph.outputs = ( + forward_wrapper.outputs[:self._num_inference_outputs] + + forward_wrapper.output_tangents + + forward_wrapper.outputs[self._num_inference_outputs:]) + return forward_wrapper._replace(output_indices=output_indices) + + def forward(self, inference_args, input_tangents): + """Construct or fetch a forward function with side-outputs. + + When graph building without a tape active, symbolic gradients rely on + regenerating the backward function for higher-order gradients (to account + for new side outputs of the rewritten forward function call). Thus there is + no fixed backward function for this case. However, when a tape is active + (eager or graph building), we generate fixed backward and forward functions + at forward function call time. + + This difference between the tape and non-tape cases is to avoid building + unneeded backward functions while graph building (where we may or may not + eventually need gradients). + + Args: + inference_args: A flat list of Tensors, arguments to the inference + function. + input_tangents: A flat list of Tensors, jvps associated with + `inference_args`. + + Returns: + A forward atomic_function.AtomicFunction. + """ + if self._forward is None: + ( + self._forward, + self._forward_graph, + self._backward, + self._forwardprop_output_indices, + self._num_forwardprop_outputs, + ) = self._forward_and_backward_functions(inference_args, input_tangents) + return self._forward + + def _wrap_backward_function( + self, forward_graph: func_graph_module.FuncGraph, backward, outputs + ): + """Create a backward function given `outputs` from the forward function.""" + capture_mapping = dict( + zip((ops.tensor_id(t) for t in forward_graph.outputs), outputs) + ) + captured_inputs = backward.captured_inputs + remapped_captures = [ + capture_mapping.get(ops.tensor_id(capture), capture) + for capture in captured_inputs + ] + if any( + t.graph is forward_graph + for t in remapped_captures + if not isinstance(t, ops.EagerTensor) + ): + incorrect_mapping = [ + t + for t in remapped_captures + if ( + not isinstance(t, ops.EagerTensor) + and t.graph is not forward_graph + ) + ] + raise errors.InternalError( + "Failed to map all backward graph captures to " + "the forward graph. Incorrectly mapped: " + f"{incorrect_mapping}." + ) + # We may need to use zeros_like to get a zero for variant Tensors with + # unconnected gradients. We do that in advance so we don't have to hold on + # to the outputs themselves, which may not be needed otherwise. + variant_zeros_like = {} + backward_function_inputs = len(backward.inputs) - len(captured_inputs) + recorded_outputs = [] + trainable_recorded_outputs = 0 + skip_positions = [] + if self._num_forwardprop_outputs and not self._need_gradients_for_jvps: + relevant_outputs = ( + outputs[: self._num_inference_outputs] + + outputs[ + self._num_inference_outputs + self._num_forwardprop_outputs : + ] + ) + else: + relevant_outputs = outputs + for output_index, output in enumerate(relevant_outputs): + if trainable_recorded_outputs < backward_function_inputs: + recorded_outputs.append(output) + if backprop_util.IsTrainable(output): + trainable_recorded_outputs += 1 + else: + skip_positions.append(output_index) + if output.dtype == dtypes.variant: + variant_zeros_like[output_index] = default_gradient.zeros_like(output) + + def _backward_function_wrapper(*args): + """Process output gradients and call the backward function.""" + if not backward.outputs: + return backward.structured_outputs + + processed_args = [] + input_index = 0 + for output_index, arg in enumerate(args): + # Convert IndexedSlices to dense tensors. The IndexedSlices optimization + # is only really effective when doing tf.gather(variable) as the + # adjoint functions for most operations are unlikely to preserve the + # sparsity in IndexedSlices. + if isinstance(arg, indexed_slices.IndexedSlices): + arg = ops.convert_to_tensor(arg) + if output_index in skip_positions: + continue + if arg is None: + # We're calling a (non-polymorphic) ConcreteFunction, so we need to + # have a Tensor value for each Tensor we thought would be trainable + # based on its dtype, even if it ended up being unconnected. + input_placeholder = backward.inputs[ + input_index] + if input_placeholder.dtype == dtypes.variant: + arg = variant_zeros_like[output_index] + else: + arg = array_ops.zeros( + *default_gradient.shape_and_dtype(input_placeholder)) + processed_args.append(arg) + input_index += 1 + if input_index >= backward_function_inputs: + break + return backward._call_flat( # pylint: disable=protected-access + processed_args, remapped_captures) + + return _backward_function_wrapper, recorded_outputs + + def record(self, flat_outputs, inference_args, input_tangents): + """Record the function call operation. + + For backprop, indicates the backward function to use and which new Tensors + must be watched. For forwardprop from eager, the function call itself will + have produced tangents which need to be recorded. + + Args: + flat_outputs: The result of running `forward`. + inference_args: A flat list of Tensors with inference inputs to the + operation. + input_tangents: A flat list of Tensors with input tangents consumed by the + operation. + """ + backward_function, to_record = self._wrap_backward_function( + self._forward_graph, self._backward, flat_outputs + ) + if self._forwardprop_output_indices: + record.record_operation_backprop_only( + self._forward.cached_definition.signature.name, + to_record, + inference_args, + backward_function, + ) + record.record_operation_forwardprop_only( + self._forward.cached_definition.signature.name, + flat_outputs, + inference_args + input_tangents, + backward_function, + self._forwardprop_output_indices, + ) + else: + record.record_operation( + self._forward.cached_definition.signature.name, + to_record, + inference_args + input_tangents, + backward_function, + ) + + +class _FirstOrderTapeGradientFunctions(_TapeGradientFunctions): + """Caches tape-friendly functions for first-order gradients.""" + + def __init__( + self, + func_graph: func_graph_module.FuncGraph, + attrs, + func_graph_deleter, + forwardprop_input_indices, + delayed_rewrite_functions, + need_gradients_for_jvps, + ): + super().__init__( + func_graph, + attrs, + func_graph_deleter, + forwardprop_input_indices, + delayed_rewrite_functions, + need_gradients_for_jvps, + ) + self._func_graph_deleter = func_graph_deleter + self._forwardprop_input_indices = forwardprop_input_indices + + def _forward_and_backward_functions(self, inference_args, input_tangents): + """Shortcut for when only first-order gradients are required. + + The returned backward function does not accept gradients with respect to + side output of forward_function. This is fine as long as the user can't + possibly request second order tape gradients, as when they've used a single + non-persistent GradientTape. Since we don't need the backward function to + take gradients with respect to side outputs, we can skip some potentially + slow graph building. + + Args: + inference_args: A flat list of Tensors, arguments to the inference + function. + input_tangents: A flat list of Tensors, jvps associated with + `inference_args`. + + Returns: + A tuple of (forward_function, backward_function): + forward_function: Takes the same inputs as the inference function, but + returns side outputs used by backward_function in addition to the + inference function's outputs. + backward_function: Takes side outputs from forward_function and + gradients with respect to the "real" outputs of forward_function and + returns gradients with respect to the inputs. + """ + outputs = self._func_graph.outputs[:self._num_inference_outputs] + return self._build_functions_for_outputs( + outputs, inference_args, input_tangents) + + +class _HigherOrderTapeGradientFunctions(_TapeGradientFunctions): + """Caches tape-friendly functions for higher-order gradients.""" + + # TODO(b/136189779): Cond/while under a tape may need similar logic. Consider + # generalizing if so. + def _forward_and_backward_functions(self, inference_args, input_tangents): + """Forward and backward functions suitable for higher-order gradients. + + Unlike in `_FirstOrderTapeGradientFunctions`, the backward function built by + this method accepts gradients for all of the outputs of the returned forward + function, including side outputs. + + Args: + inference_args: A flat list of Tensors, arguments to the inference + function. + input_tangents: A flat list of Tensors, jvps associated with + `inference_args`. + + Returns: + A tuple of (forward_function, backward_function): + forward_function: Takes the same inputs as the inference function, but + returns side outputs used by backward_function in addition to the + inference function's outputs. + backward_function: Takes side outputs from forward_function and + gradients with respect to all of its outputs, real and side. Returns + gradients with respect to the inputs. + """ + outputs = [] + iteration_count = 0 + # First we need to figure out how many side outputs from the forward pass + # will be required. We do this in a temporary graph to avoid actually + # running multiple copies of the backward pass (one per _GradientsHelper + # call). + # + # While computing gradients, the backward function captures Tensors from + # the forward function. We add these as side outputs of the original + # function. However, we then need to accept output gradients with respect + # to these side outputs for higher order gradients to work. Thus we loop + # until the number of outputs of the function stabilizes. Note that this + # is only required for tape gradients, where we need to declare in advance + # all of the forward op's outputs: symbolic gradients with tf.gradients + # instead rely on regenerating backward functions when higher-order + # gradients are requested. + while (len(outputs) < len(self._func_graph.outputs) + # It's possible for gradient generation to add new ops to the forward + # pass. If all of the new outputs are non-trainable, there's no + # reason to continue. + and any(backprop_util.IsTrainable(output) + for output in self._func_graph.outputs[len(outputs):])): + iteration_count += 1 + if iteration_count >= 20 and iteration_count % 5 == 0: + new_op_with_trainable_output = None + num_new_trainable_outputs = 0 + for output in self._func_graph.outputs[len(outputs):]: + if backprop_util.IsTrainable(output): + num_new_trainable_outputs += 1 + new_op_with_trainable_output = output.op + logging.warning( + ("Determining side outputs for the function '{}' is taking longer " + "than expected ({} iterations, typically this converges in 5 or " + "so). This could indicate that a gradient registration is adding " + "new ops to the forward pass every time gradients are generated. " + "{} new trainable output(s) were added this iteration, one from " + "the following op:\n {}\nThis may indicate a TensorFlow bug, or " + "an issue in a tf.custom_gradient.") + .format( + self._func_graph.name, iteration_count, + num_new_trainable_outputs, new_op_with_trainable_output)) + outputs = list(self._func_graph.outputs) + self._build_functions_for_outputs( + outputs, inference_args, input_tangents) + + (forward_function, forward_graph, + backward_function, output_indices, num_output_tangents) = ( + self._build_functions_for_outputs( + outputs, inference_args, input_tangents)) + if (len(self._func_graph.outputs) > len(outputs) + and any(backprop_util.IsTrainable(output) + for output in self._func_graph.outputs[len(outputs):])): + raise errors.InternalError( + "Unexpectedly added new outputs to the forward function when " + "building the backward function: " + f"{self._func_graph.outputs[len(outputs):]}.") + return (forward_function, forward_graph, backward_function, output_indices, + num_output_tangents) + + +class _ForwardBackwardCall(object): + """Holds the state of a function call between execution and recording.""" + + __slots__ = [ + "_functions", "_inference_args", "_input_tangents", "_tape_watching" + ] + + def __init__(self, functions, inference_args, input_tangents, tape_watching): + """Collects information about the function call. + + Args: + functions: An object which produces forward and backward functions, either + a _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object. + inference_args: A flat list of Tensors, arguments to the inference + function. + input_tangents: A flat list of Tensors, jvps associated with + `inference_args`. + tape_watching: Boolean, with True indicating that recording is necessary. + """ + self._functions = functions + self._inference_args = inference_args + self._input_tangents = input_tangents + self._tape_watching = tape_watching + + def forward(self): + """Builds or retrieves a forward function for this call.""" + forward_function = self._functions.forward( + self._inference_args, self._input_tangents + ) + return forward_function, self._inference_args + self._input_tangents + + def record(self, flat_outputs): + """Given outputs from the execution of `forward`, records the operation.""" + if ( + self._tape_watching + and not isinstance(flat_outputs, ops.Operation) + and flat_outputs is not None + ): + # We only record function calls which have outputs, and then only when a + # tape is watching. + self._functions.record( + flat_outputs, self._inference_args, self._input_tangents + ) + + +class ConcreteFunction(core.ConcreteFunction, trackable.Trackable): + """A `tf.types.experimental.ConcreteFunction` created from `tf.function`.""" + + def __init__( + self, atomic_fn: atomic_function.AtomicFunction, shared_func_graph=True + ): + """Initialize a `ConcreteFunction`. + + Args: + atomic_fn: Inference atomic function to form basis of forward pass. + shared_func_graph: If False, the ConcreteFunction takes ownership of + `func_graph` and will break reference cycles when it is deleted. This + makes the FuncGraph inoperable. + + Raises: + ValueError: If number of input_placeholders is not equal to the number + of function inputs. + """ + # _arg_keywords and _num_positional_args define the flat signature. They + # are assigned after construction. + self._arg_keywords = None + self._num_positional_args = None + + self._func_graph = atomic_fn.graph + self._captured_inputs = ( + self._func_graph.external_captures + + self._func_graph.deferred_external_captures + ) + self._function_type = atomic_fn.function_type + + self._output_shapes = tuple( + output.shape for output in self._func_graph.outputs) + self._attrs = attributes_lib.parse_func_attrs( + atomic_fn.attributes or {} + ) + + if shared_func_graph: + self._garbage_collector = None + else: + self._garbage_collector = ConcreteFunctionGarbageCollector( + atomic_fn.graph + ) + + # Pairs of forward and backward functions used for computing gradients. + # + # These each get a reference to the FuncGraph deleter since they use the + # FuncGraph directly. + self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions( + atomic_fn, self._garbage_collector) + self._first_order_tape_functions = {} + self._higher_order_tape_functions = {} + # Cache the inference function to avoid a (Python) function call when not + # building gradients. + self._inference_function = self._delayed_rewrite_functions.forward() + + @classmethod + def from_func_graph(cls, graph, function_type, attrs, shared_func_graph=True): + atomic_fn = atomic_function.from_func_graph( + _inference_name(graph.name), graph, attrs, function_type + ) + return ConcreteFunction(atomic_fn, shared_func_graph=shared_func_graph) + + @property + def function_type(self): + """Return the FunctionType associated with this ConcreteFunction.""" + return self._function_type + + @property + def inference_fn(self): + """Return the inference function associated with this ConcreteFunction.""" + return self._inference_function + + # TODO(fmuham): Remove this property. + @property + def _function_spec(self): + if self.function_type is None: + return None + + return function_type_utils.FunctionSpec( + self.function_type, + { + p.default + for p in self.function_type.parameters.values() + if p.optional + }, + False, + name=self.name, + ) + + @property + def variables(self): + """Sequence of variables for this function.""" + return tuple(self._func_graph.variables) + + def set_variables(self, variables): + self._func_graph.variables = variables + + @property + def trainable_variables(self): + """Sequence of trainable variables for this function.""" + return tuple(self._func_graph.trainable_variables) + + def __call__(self, *args, **kwargs): + """Executes the wrapped function. + + ConcreteFunctions have two signatures: + + * The signature of the original function wrapped by this ConcreteFunction. + * A flat signature, where each argument accepts a single Tensor. + + The original function signature is generally preferred, but the flat input + signature is supported for backward compatibility. + + ### Original Function Signature + + When calling a ConcreteFunction with the signature of the original function, + each argument must match the type or value that was used when the + ConcreteFunction's graph was traced. In particular: + + * Tensor arguments (including CompositeTensors, such as RaggedTensor) must + have matching `TypeSpec`s. + * Non-Tensor arguments (such as booleans or ints) must have equal values. + * Nested arguments (such as lists, tuples, or dictionaries) must have the + same nesting structure; and each nested value must have a matching type + or value. + + The default value for any arguments that were traced with non-Tensor values + is the value that was used in the trace. Arguments that were traced with + tensor arguments do not have a default value (even if the original function + had a default value for that argument). + + ### Flat Signature + + When calling a ConcreteFunction with the flat signature, the arguments + correspond to the flattened component tensors of the arguments that were + used to construct the ConcreteFunction. Parameter names are assigned based + on `TensorSpec.name` (when specified) or the original argument names (with + suffixes automatically added for nested arguments or composite tensors with + multiple components). + + Args: + *args: Positional arguments to the concrete function. + **kwargs: Keyword arguments to the concrete function. + + Returns: + The result of applying the TF function on the given Tensors. + + Raises: + AssertionError: If this `ConcreteFunction` was not created through + `get_concrete_function`. + TypeError: If the arguments do not match the function's signature. + """ + return self._call_impl(args, kwargs) + + def _call_impl(self, args, kwargs): + """See `__call__` for details.""" + with trace.Trace(self._func_graph.name, tf_function_call="concrete"): + # Construct the list of input tensors: check if the structured signature + # applies first; and if not, then use the flat signature. + if self.function_type is not None: + try: + return self._call_with_structured_signature(args, kwargs) + except TypeError as structured_err: + try: + return self._call_with_flat_signature(args, kwargs) + except (TypeError, ValueError) as flat_err: + raise TypeError( # pylint: disable=raise-missing-from + str(structured_err) + + "\nFallback to flat signature also failed due to: " + + str(flat_err) + ) + + return self._call_with_flat_signature(args, kwargs) + + def _call_with_flat_signature(self, args, kwargs): + """Executes the wrapped function with the flat signature. + + Args: + args: Positional arguments to the concrete function. + kwargs: Keyword arguments to the concrete function. + + Returns: + The result of applying the function on the Tensors/Variables contained in + `args` and `kwargs`. + Raises: + TypeError: if `args` and `kwargs` do not match the flat signature of this + `ConcreteFunction`. + """ + if len(args) > self._num_positional_args: + raise TypeError( + f"{self._flat_signature_summary()} takes {self._num_positional_args} " + f"positional arguments, got {len(args)}.") + args = list(args) + kwargs = dict(kwargs) + kwargs = { + function_type_lib.sanitize_arg_name(k): v for k, v in kwargs.items() + } + for keyword in self._arg_keywords[len(args):]: + try: + args.append( + kwargs.pop( + function_type_lib.sanitize_arg_name(compat.as_str(keyword)))) + except KeyError: + specified_keywords = ( + list(self._arg_keywords[:len(args)]) + list(kwargs.keys())) + missing_required_args = sorted( + set(self._arg_keywords) - set(specified_keywords)) + raise TypeError(f"{self._flat_signature_summary()} missing required " + f"arguments: {', '.join(missing_required_args)}.") + if kwargs: + positional_arg_keywords = set(self._arg_keywords[:len(args)]) + for unused_key in kwargs: + if unused_key in positional_arg_keywords: + raise TypeError(f"{self._flat_signature_summary()} got two values " + f"for '{unused_key}'.") + raise TypeError(f"{self._flat_signature_summary()} got unexpected " + f"keyword arguments: {', '.join(sorted(kwargs))}.") + + for i, arg in enumerate(args): + if not isinstance( + arg, (tensor_lib.Tensor, resource_variable_ops.BaseResourceVariable)): + raise TypeError(f"{self._flat_signature_summary()}: expected argument " + f"#{i}(zero-based) to be a Tensor; " + f"got {type(arg).__name__} ({arg}).") + return self._call_flat(args, self.captured_inputs) + + def _call_with_structured_signature(self, args, kwargs): + """Executes the wrapped function with the structured signature. + + Args: + args: Positional arguments to the concrete function. + kwargs: Keyword arguments to the concrete function. + + Returns: + The result of applying the function on the Tensors/Variables contained in + `args` and `kwargs`. + Raises: + TypeError: if `args` and `kwargs` do not match the structured signature + of this `ConcreteFunction`. + """ + bound_args = ( + function_type_utils.canonicalize_function_inputs( + args, kwargs, self.function_type) + ) + filtered_flat_args = self.function_type.unpack_inputs(bound_args) + return self._call_flat( + filtered_flat_args, + captured_inputs=self.captured_inputs) + + def _call_flat(self, tensor_inputs, captured_inputs): + """Executes the wrapped function. + + Args: + tensor_inputs: a list of only Tensors generated from args, kwargs. + captured_inputs: the captured inputs that are also part of the input args + to the actual execution. By default, it should be self._captured_inputs. + Returns: + The result of applying the TF function to `args`. + + Raises: + ValueError: If `args` contains anything other than Tensors or Variables. + """ + ctx = context.context() + executing_eagerly = ctx.executing_eagerly() + + # Copy saveable status of function's graph to current FuncGraph. + default_graph = ops.get_default_graph() + if default_graph.building_function and not self._func_graph.saveable: + default_graph.mark_as_unsaveable(self._func_graph.saving_errors) + + if (record.could_possibly_record() or + hasattr(default_graph, "watch_variable")): + for v in self._func_graph.variables: + resource_variable_ops.variable_accessed(v) + + # TODO(fmuham): check in eager mode too. + if not executing_eagerly: + for i, tensor_input in enumerate(tensor_inputs): + # Can not compare shapes in these cases + # TODO(b/216506654): Consider moving this check elsewhere and making it + # work for all types (e.g. by including shape for Variables). + if (tensor_input.dtype == dtypes.resource or + tensor_input.dtype == dtypes.variant): + continue + + # If we're graph building, shape inference is on. We check for input + # compatibility up front to avoid hard to debug incompatibilities + # later. + graph_input_shape = tensor_shape.TensorShape( + self._func_graph.inputs[i].shape) + if not graph_input_shape.is_compatible_with(tensor_input.shape): + raise ValueError( + f"Tensor {tensor_input} is not compatible with the shape this " + f"function was traced with. Expected shape " + f"{self._func_graph.inputs[i].shape}, but got shape " + f"{tensor_input.shape}.\n\nIf you called get_concrete_function, " + f"you may need to pass a tf.TensorSpec(..., shape=...) with a " + f"less specific shape, having None on axes which can vary.") + + args = tensor_inputs + captured_inputs + possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args) + if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE + and executing_eagerly): + # No tape is watching; skip to running the function. + return self._inference_function.call_preflattened(args) + forward_backward = self._select_forward_and_backward_functions( + args, + possible_gradient_type, + executing_eagerly) + forward_function, args_with_tangents = forward_backward.forward() + if executing_eagerly: + flat_outputs = forward_function.call_flat(*args_with_tangents) + else: + with default_graph._override_gradient_function( # pylint: disable=protected-access + {"PartitionedCall": self._get_gradient_function(), + "StatefulPartitionedCall": self._get_gradient_function()}): + flat_outputs = forward_function.call_flat(*args_with_tangents) + forward_backward.record(flat_outputs) + return self.function_type.pack_output(flat_outputs) + + @property + def name(self): + """`ConcreteFunction` name.""" + return self._delayed_rewrite_functions.forward().name + + @property + def graph(self): + """Returns the graph from which this function was constructed.""" + return self._func_graph + + @property + def inputs(self): + """Returns tensors in `self.graph` corresponding to arguments.""" + return self._func_graph.inputs + + @property + def structured_input_signature(self): + """Returns structured signature for this concrete function. + + Returns: + A tuple `(args, kwargs)`, where: + + * `args` is a tuple that specifies the expected type or value each for + positional argument. + * `kwargs` is a dictionary that specifies the expected type or value + for each keyword-only argument. + + The type or value for each argument is specified using one of the + following: + + * A `tf.TypeSpec`, indicating that a Tensor or other TensorFlow-native + value is expected. + * A Python value, such as an integer, indicating that an equal value + is expected. + * A nested structure of `tf.TypeSpec`s and Python values, indicating + that a corresponding nested structure is expected. + """ + return self._func_graph.structured_input_signature + + @property + def outputs(self): + """Returns tensors in `self.graph` corresponding to returned tensors.""" + return self._func_graph.outputs + + @property + def structured_outputs(self): + """Returns outputs in `self.graph` as returned by the original function.""" + return self._func_graph.structured_outputs + + def set_external_captures(self, captures): + """Updates the function capture values. + + The new values must have tensor types and shapes consistent with the + original captures of the concrete function, but it is allowed to change a + value captured with a deferred one and vice-versa. + + Args: + captures: A list of tensors or closures. Tensors are value captures, and + closures are call-time (deferred captures). + """ + # TODO(wxinyi): 1. verify that the new captures' type spec is compatible + # with the original's. However, doing so requires MirroredVariable captures + # initialized. 2. replace the original/new captures/deferred + # captures in the wrapped graph. Doing such for a capture-to-deferred + # capture replacement requires more arguments than the deferred capture + # itself, e.g. default value, spec. + self._captured_inputs = captures + + def replace_capture_with_deferred_capture(self, + tensor, + closure, + spec, + placeholder=None, + default_value=None): + """Replaces existing capture `tensor` with a deferred capture `closure`. + + This API replaces the capture `tensor` from the concrete function's captured + inputs list, and places the deferred capture `closure` in + its spot so the order of captured inputs is preserved. This is important + because the old `tensor` and the new `closure` will have the same internal + placeholder, which can be passed through the `placeholder` argument, or + skipped, in which case we find the placeholder from internal inputs by + indexing `tensor` in the external captured inputs list. Thus, it is + important that the new deferred capture has output spec (specified by the + `spec` argument) compatible with the internal placeholder (`placeholder`) + and the original capture (`tensor`). + + For example, + + ```python + bool_captured_tensor = tf.constant(True) + float_captured_tensor = tf.constant([3.], dtype=tf.float32) + value = tf.constant([2.], dtype=tf.float32) + + @tf.function + def fn(): + deferred_tensor = ops.get_default_graph().capture_call_time_value( + lambda: value, + tf.TensorSpec(shape=(1,), dtype=tf.float32)) + if bool_captured_tensor: + return deferred_tensor + else: + return deferred_tensor + float_captured_tensor + + concrete_fn = fn.get_concrete_function() + print(concrete_fn()) # tf.Tensor([2.], shape=(1,), dtype=float32) + + new_bool_captured_tensor = constant_op.constant(False) + def bool_closure(): + return new_bool_captured_tensor + + concrete_fn.replace_capture_with_deferred_capture( + bool_captured_tensor, + bool_closure, + spec=tensor_lib.TensorSpec(shape=(), dtype=dtypes.bool)) + + print(concrete_fn()) # tf.Tensor([5.], shape=(1,), dtype=float32) + ``` + + Args: + tensor: Tensor already captured. This `tensor` should be listed in + concrete_function.captured_inputs except when it's empty such as when + the concrete function is restored from SavedModel. + closure: function which takes no arguments, to be evaluated at function + call time, returning a nest of tensors compatible with `spec`. + spec: nest of TypeSpec for the value to capture. + placeholder: optional. The internal placeholder corresponding to the + captured `tensor` and the new `closure`. + default_value: optional value to use in environments that cannot safely + evaluate closure. + """ + capture_index = None + for i, capture in enumerate(self._captured_inputs): + if id(tensor) == id(capture): + capture_index = i + break + + if placeholder is None: + if capture_index is None: + raise ValueError( + f"Did not find `tensor` argument {tensor} in the ConcreteFunction's" + " captured inputs list, and did not receive a placeholder argument." + " Thus we're unable to infer the internal placeholder. ") + + placeholder = self.inputs[-len(self._captured_inputs) + capture_index] + + if not (spec.is_compatible_with(tensor) or + spec.is_compatible_with(placeholder)): + raise ValueError( + f"Attempting to substitute closure with spec {spec} that's " + f"incompatible with the original capture {tensor} or the internal " + f"placeholder {placeholder}.") + + self._func_graph.replace_capture_with_deferred_capture( + tensor=tensor, + closure=closure, + spec=spec, + placeholder=placeholder, + default_value=default_value) + + if capture_index is not None: + self._captured_inputs[capture_index] = closure + + @property + def captured_inputs(self): + """Returns external Tensors captured by this function. + + self.__call__(*args) passes `args + self.captured_inputs` to the function. + """ + return nest.flatten( + [x() if callable(x) else x for x in self._captured_inputs], + expand_composites=True) + + @property + def function_def(self): + """Returns a `FunctionDef` object representing this function.""" + return self._delayed_rewrite_functions.forward().cached_definition + + @property + def output_shapes(self): + """The function's output shapes.""" + return nest.map_structure( + lambda x: getattr(x, "shape", tensor_shape.TensorShape(None)), + composite_tensor.replace_composites_with_components( + self._func_graph.structured_outputs), + expand_composites=False) + + @property + def output_dtypes(self): + # TODO(akshayka): Consider removing this. + return nest.map_structure( + lambda x: x.dtype if x is not None else None, + composite_tensor.replace_composites_with_components( + self._func_graph.structured_outputs), + expand_composites=False) + + def add_to_graph(self, g=None, overwrite=False): + """Registers the function, adds it to the graph g or default graph. + + Args: + g: If specified, registers the function with this graph. Defaults to the + current context (either the default graph or the eager context). + overwrite: A bool. If True, its forward function will overwrite + any existing function of the same signature name in the graph `g`. + """ + # If we are not executing eagerly, adds the function to default graph if no + # graph is specified. + # In case of eager execution, function definition gets added to context + # during construction itself. + + if not context.executing_eagerly() and not g: + g = ops.get_default_graph() + + if g is not None: + g._add_function_recursive(self._delayed_rewrite_functions.forward()) # pylint: disable=protected-access + + def add_gradient_functions_to_graph(self, g=None): + """Add forward/backward functions to graph `g` or the current context.""" + if not context.executing_eagerly() and not g: + g = ops.get_default_graph() + g._add_function_recursive(self._delayed_rewrite_functions.forward()) # pylint: disable=protected-access + forward_function, backward_function = ( + self._delayed_rewrite_functions.forward_backward()) + g._add_function_recursive(forward_function) # pylint: disable=protected-access + backward_function.add_to_graph(g) + + def _get_gradient_function(self): + """Returns gradient function. It will be lazily created at first call.""" + return self._delayed_rewrite_functions._rewrite_forward_and_call_backward # pylint: disable=protected-access + + def _select_forward_and_backward_functions( + self, args, possible_gradient_type, executing_eagerly): + """Selects forward and backward functions based on the calling context. + + The forward function computes the "real" function outputs, `self._outputs`, + and any extra values needed by the corresponding backward function. + + Args: + args: A flat list of Tensors with all of the inputs to the forward + function (including user-specified and captured inputs). + possible_gradient_type: One of gradients_util.POSSIBLE_GRADIENT_TYPES_*. + executing_eagerly: Boolean, the value of context.executing_eagerly(). + + Returns: + An object with a `forward` method returning a tuple of (forward_function : + AtomicFunction, augmented_arguments : List), and a corresponding + `record` method which takes outputs from the forward function and records + the operation. forward_function should be called with augmented_arguments. + """ + if executing_eagerly: + input_tangents = forwardprop_util.pack_tangents(args) + else: + input_tangents = forwardprop_util.TangentInfo() + need_gradients_for_jvps = record.should_record_backprop( + input_tangents.tangents) + # Allows re-use of forward and backward function pairs depending on the + # tapes and forward accumulators watching its inputs. + cache_key = (need_gradients_for_jvps, input_tangents.indices) + if (possible_gradient_type + == gradients_util.POSSIBLE_GRADIENT_TYPES_FIRST_ORDER): + if input_tangents.indices or executing_eagerly: + # There is a single non-persistent tape active, so the user can only + # request first-order gradients from a tape. We can spend less time + # graph building since we know this. + # + # We may still end up computing higher-order gradients, but that'd be + # through `tf.gradients`, which can re-write the forward pass and so + # needs no preparation here. + functions = self._first_order_tape_functions.get(cache_key, None) + if functions is None: + functions = _FirstOrderTapeGradientFunctions( + self._func_graph, self._attrs, self._garbage_collector, + forwardprop_input_indices=input_tangents.indices, + delayed_rewrite_functions=self._delayed_rewrite_functions, + need_gradients_for_jvps=need_gradients_for_jvps) + self._first_order_tape_functions[cache_key] = functions + return _ForwardBackwardCall( + functions, args, input_tangents.tangents, tape_watching=True) + else: + # We can avoid computing second-order gradients in some cases by doing a + # delayed rewrite when graph building. Since we know we'll only compute + # first-order tape gradients, the delayed rewrite is safe: we won't need + # to tell the tape about side outputs. + # + # TODO(allenl): This case is really dirty. It would be better if we + # could temporarily pop all of the current tapes to avoid + # accidentally taking second-order gradients. + return _ForwardBackwardCall( + self._delayed_rewrite_functions, args, input_tangents.tangents, + tape_watching=True) + elif (possible_gradient_type + == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER): + # Either there's a persistent tape watching, or there are multiple nested + # tapes. Either way, the user may request higher-order gradients. We'll + # spend a bit more time and make sure higher-order gradients are correct. + functions = self._higher_order_tape_functions.get( + cache_key, None) + if functions is None: + functions = _HigherOrderTapeGradientFunctions( + self._func_graph, self._attrs, self._garbage_collector, + forwardprop_input_indices=input_tangents.indices, + delayed_rewrite_functions=self._delayed_rewrite_functions, + need_gradients_for_jvps=need_gradients_for_jvps) + self._higher_order_tape_functions[cache_key] = functions + return _ForwardBackwardCall(functions, args, input_tangents.tangents, + tape_watching=True) + # else possible_gradient_type == POSSIBLE_GRADIENT_TYPES_NONE, meaning no + # tape is recording. + return _ForwardBackwardCall( + self._delayed_rewrite_functions, args, input_tangents.tangents, + tape_watching=False) + + @property + def _as_name_attr_list(self): + """Returns a `NameAttrList` representing this function.""" + ret = attr_value_pb2.NameAttrList(name=self.name) + for name, value in self._attrs.items(): + ret.attr[name].CopyFrom(value) + return ret + + def _flat_signature_summary(self): + """Returns a string summarizing this function's flat signature.""" + assert self._arg_keywords is not None + assert self._num_positional_args is not None + arg_names = self._arg_keywords + if self._num_positional_args > len(arg_names): + arg_names.extend( + "".format(i + 1) + for i in range(len(arg_names), self._num_positional_args)) + return f"{self._func_graph.name}({', '.join(arg_names)})" + + def pretty_printed_signature(self, verbose=True): + """Returns a string summarizing the signature of this concrete function.""" + assert self.function_type is not None + if verbose: + return repr(self.function_type) + else: + return str(self.function_type) + + def __repr__(self): + if self.function_type is not None: + return "".format( + self.pretty_printed_signature(verbose=False), id(self) + ) + elif not (self._num_positional_args is None or self._arg_keywords is None): + return "".format( + self._flat_signature_summary(), id(self) + ) + else: + return object.__repr__(self) + + def __str__(self): + if self.function_type is not None: + return "ConcreteFunction {}".format( + self.pretty_printed_signature(verbose=True) + ) + else: + return self.__repr__() + + def _trackable_children(self, save_type="checkpoint", **kwargs): + """Implements `Trackable`.""" + if save_type == "checkpoint": + # Checkpoint dependencies do not include functions at all. Users + # expect the checkpointed variables to be saved using the model + # architecture, e.g. `model.layers[1].kernel` or `model.variables`. + return {} + + captured_trackables = {} + for n, (capture, _) in enumerate(self.graph.captures): + if (capture.dtype not in (dtypes.variant, dtypes.resource) and + not resource_variable_ops.is_resource_variable(capture)): + # Variant/resource type tensors are skipped since we have no way of + # getting the `Trackable` wrapper for these tensors. The wrappers are + # expected to be elsewhere in the saved object graph. + # TODO(b/223866972): Directly encode/decode tensor captures. + + # Resource variable captures are also skipped at this time, to maintain + # existing behavior. + # TODO(b/217979389): Return the non-constant captures as children. + + captured_trackables[f"capture_{n}"] = capture + + return captured_trackables + + def _deserialization_dependencies(self, children): + return children + + def _export_to_saved_model_graph(self, object_map, tensor_map, + **unused_kwargs): + if not self.graph.saveable: + raise ValueError( + (f"Unable to save function {self.name} for the following reason(s):\n" + + "\n".join(self.graph.saving_errors))) + self.add_to_graph() + object_map[self] = saved_model_exported_concrete.ExportedConcreteFunction( + self, tensor_map) + return [] + + +class ConcreteFunctionGarbageCollector: + """Cleans up reference cycles when a `ConcreteFunction` goes out of scope.""" + + __slots__ = ["_func_graph"] + + def __init__(self, func_graph): + self._func_graph = func_graph + + def release(self): + """Call off the FuncGraph deletion.""" + self._func_graph = None + + def __del__(self): + if func_graph_module is None or self._func_graph is None: + return + try: + func_graph_module.dismantle_func_graph(self._func_graph) + except: # pylint: disable=bare-except + pass + + +class _Marker(object): + """Markers used to pretty-print nested args in function signatures.""" + + __slots__ = ["_s"] + + def __init__(self, s): + self._s = s + + def __repr__(self): + return str(self._s) + + +def _contains_type_spec(value): + return any(isinstance(x, type_spec.TypeSpec) for x in nest.flatten(value)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/eager_function_run.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/eager_function_run.py new file mode 100644 index 0000000000000000000000000000000000000000..19a4a5867d776e5f75807ee1b2d8e381b0be20fc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/eager_function_run.py @@ -0,0 +1,111 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""Eager semantics for polymorphic function.""" + +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +RUN_FUNCTIONS_EAGERLY = False + + +@tf_export("config.functions_run_eagerly") +def functions_run_eagerly(): + """Returns the value of the `run_functions_eagerly` setting.""" + return RUN_FUNCTIONS_EAGERLY + + +@tf_export("config.run_functions_eagerly") +def run_functions_eagerly(run_eagerly): + """Enables / disables eager execution of `tf.function`s. + + Calling `tf.config.run_functions_eagerly(True)` will make all + invocations of `tf.function` run eagerly instead of running as a traced graph + function. This can be useful for debugging. As the code now runs line-by-line, + you can add arbitrary `print` messages or pdb breakpoints to monitor the + inputs/outputs of each Tensorflow operation. However, you should avoid using + this for actual production because it significantly slows down execution. + + >>> def my_func(a): + ... print(f'a: {a}') + ... return a + a + >>> a_fn = tf.function(my_func) + + >>> # A side effect the first time the function is traced + >>> # In tracing time, `a` is printed with shape and dtype only + >>> a_fn(tf.constant(1)) + a: Tensor("a:0", shape=(), dtype=int32) + + + >>> # `print` is a python side effect, it won't execute as the traced function + >>> # is called + >>> a_fn(tf.constant(2)) + + + >>> # Now, switch to eager running + >>> tf.config.run_functions_eagerly(True) + >>> # The code now runs eagerly and the actual value of `a` is printed + >>> a_fn(tf.constant(2)) + a: 2 + + + >>> # Turn this back off + >>> tf.config.run_functions_eagerly(False) + + Note: This flag has no effect on functions passed into tf.data transformations + as arguments. tf.data functions are never executed eagerly and are always + executed as a compiled Tensorflow Graph. + + Args: + run_eagerly: Boolean. Whether to run functions eagerly. + """ + global RUN_FUNCTIONS_EAGERLY + RUN_FUNCTIONS_EAGERLY = bool(run_eagerly) + + +@deprecation.deprecated( + None, "Use `tf.config.run_functions_eagerly` instead of the experimental " + "version.") +@tf_export("config.experimental_run_functions_eagerly") +def experimental_run_functions_eagerly(run_eagerly): + """Enables / disables eager execution of `tf.function`s. + + Calling `tf.config.experimental_run_functions_eagerly(True)` will make all + invocations of `tf.function` run eagerly instead of running as a traced graph + function. + + See `tf.config.run_functions_eagerly` for an example. + + Note: This flag has no effect on functions passed into tf.data transformations + as arguments. tf.data functions are never executed eagerly and are always + executed as a compiled Tensorflow Graph. + + Args: + run_eagerly: Boolean. Whether to run functions eagerly. + + Returns: + None + """ + return run_functions_eagerly(run_eagerly) + + +@deprecation.deprecated( + None, + "Use tf.config.functions_run_eagerly instead of the experimental version.") +@tf_export("config.experimental_functions_run_eagerly") +def experimental_functions_run_eagerly(): + """Returns the value of the `experimental_run_functions_eagerly` setting.""" + return functions_run_eagerly() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/function_context.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/function_context.py new file mode 100644 index 0000000000000000000000000000000000000000..76278dcc7bc5081ce7d429582a5c314981ccb3c1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/function_context.py @@ -0,0 +1,127 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Context information for a tf.function.""" + +from typing import NamedTuple, Any + +from tensorflow.core.function.polymorphism import function_cache +from tensorflow.python.eager import context +from tensorflow.python.framework import device as pydev +from tensorflow.python.framework import func_graph as func_graph_module +from tensorflow.python.framework import ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.saved_model import save_context + + +# EagerContext is used by tf.function to identify cases where tracing +# needs to occur due to a change in conditions other than the arguments. +class EagerContext(NamedTuple): + parent_graph: Any + device_functions: Any + colocation_stack: Any + in_cross_replica_context: Any + variable_policy: Any + xla_context_id: Any + + +def make_function_context(scope_type=None) -> function_cache.FunctionContext: + """Generates a FunctionContext based on current contextual info.""" + ctx = context.context() + + # Don't need to open an init_scope if the tf.function call is in eager mode + # already. + executing_eagerly = ctx.executing_eagerly() + parent_graph = None + xla_context_id = 0 + if not executing_eagerly: + # We want to force function retracing for each different + # XLAControlFlowContext, so add `xla_context_id` to the context. + xla_context = _enclosing_xla_context() + if xla_context is not None and xla_context.RequiresUniqueFunctionRetracing( + ): + xla_context_id = id(xla_context) + + with ops.init_scope(): + # The graph, or whether we're executing eagerly, should be a part of the + # cache key so we don't improperly capture tensors such as variables. + executing_eagerly = ctx.executing_eagerly() + parent_graph = None if executing_eagerly else ops.get_default_graph() + + # pylint: disable=protected-access + default_graph = ops.get_default_graph() + # TODO(b/117617952): The current distribution strategy will affect graph + # building (e.g. accessing different variables from different devices) and + # so requires retracing for each device. + strategy_stack = default_graph._distribution_strategy_stack + uses_distribution_strategy = ( + strategy_stack and + strategy_stack[-1].strategy.extended._retrace_functions_for_each_device) + if executing_eagerly: + colocation_stack = () + if uses_distribution_strategy: + device_functions = (pydev.merge_device(ctx.device_name),) + else: + device_functions = () + else: + colocation_stack = tuple(default_graph._colocation_stack.peek_objs()) + if (uses_distribution_strategy or + func_graph_module.device_stack_has_callable( + default_graph._device_function_stack)): + # Putting the device in the cache key ensures that call-site device + # annotations are respected. + device_functions = tuple(default_graph._device_functions_outer_to_inner) + else: + device_functions = () + + in_cross_replica_context = False + try: + in_cross_replica_context = (strategy_stack[-1].replica_context is None) # pylint: disable=protected-access + except (AttributeError, IndexError): + pass + + if save_context.in_save_context(): + variable_policy = ( + save_context.get_save_options().experimental_variable_policy) + else: + variable_policy = None + + return function_cache.FunctionContext( + EagerContext( + parent_graph, + device_functions, + colocation_stack, + in_cross_replica_context, + variable_policy, + xla_context_id, + ), + scope_type, + ) + + +def _enclosing_xla_context(): + """Returns the XLAControlFlowContext, which exists inside a tpu.rewrite().""" + graph = ops.get_default_graph() + while graph is not None: + # pylint: disable=protected-access + context_ = graph._get_control_flow_context() + # pylint: enable=protected-access + while context_ is not None: + if isinstance(context_, control_flow_ops.XLAControlFlowContext): + return context_ + context_ = context_.outer_context + # This may be a FuncGraph due to defuns or v2 control flow. We need to + # find the original graph with the XLAControlFlowContext. + graph = getattr(graph, "outer_graph", None) + return None diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/function_type_utils.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/function_type_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..78528679a9be7111ada9ab6f119c53af0f938e7f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/function_type_utils.py @@ -0,0 +1,548 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for using FunctionType with tf.function.""" + +import functools +import inspect +from typing import Any, Dict, Tuple + +import six + +from tensorflow.core.function import trace_type +from tensorflow.core.function.polymorphism import function_type as function_type_lib +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import type_spec +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.util import nest + + +def to_fullargspec( + function_type: function_type_lib.FunctionType, + default_values: Dict[str, Any], +) -> inspect.FullArgSpec: + """Generates backwards compatible FullArgSpec from FunctionType.""" + args = [] + varargs = None + varkw = None + defaults = [] + kwonlyargs = [] + kwonlydefaults = {} + + for parameter in function_type.parameters.values(): + if parameter.kind in [ + inspect.Parameter.POSITIONAL_ONLY, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + ]: + args.append(parameter.name) + if parameter.default is not inspect.Parameter.empty: + defaults.append(default_values[parameter.name]) + elif parameter.kind is inspect.Parameter.KEYWORD_ONLY: + kwonlyargs.append(parameter.name) + if parameter.default is not inspect.Parameter.empty: + kwonlydefaults[parameter.name] = default_values[parameter.name] + elif parameter.kind is inspect.Parameter.VAR_POSITIONAL: + varargs = parameter.name + elif parameter.kind is inspect.Parameter.VAR_KEYWORD: + varkw = parameter.name + + return inspect.FullArgSpec( + args, + varargs, + varkw, + tuple(defaults) if defaults else None, + kwonlyargs, + kwonlydefaults if kwonlydefaults else None, + annotations={}, + ) + + +def _to_default_values(fullargspec): + """Returns default values from the function's inspected fullargspec.""" + if fullargspec.defaults is not None: + defaults = { + name: value + for name, value in zip( + fullargspec.args[-len(fullargspec.defaults) :], fullargspec.defaults + ) + } + else: + defaults = {} + + if fullargspec.kwonlydefaults is not None: + defaults.update(fullargspec.kwonlydefaults) + + defaults = { + function_type_lib.sanitize_arg_name(name): value + for name, value in defaults.items() + } + + return defaults + + +def to_function_type(fullargspec): + """Generates FunctionType and default values from fullargspec.""" + default_values = _to_default_values(fullargspec) + parameters = [] + + for arg in fullargspec.args: + arg_name = function_type_lib.sanitize_arg_name(arg) + parameters.append( + function_type_lib.Parameter( + arg_name, + function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, + arg_name in default_values, + None, + ) + ) + + if fullargspec.varargs is not None: + parameters.append( + function_type_lib.Parameter( + fullargspec.varargs, + function_type_lib.Parameter.VAR_POSITIONAL, + False, + None, + ) + ) + + for kwarg in fullargspec.kwonlyargs: + parameters.append( + function_type_lib.Parameter( + function_type_lib.sanitize_arg_name(kwarg), + function_type_lib.Parameter.KEYWORD_ONLY, + kwarg in default_values, + None, + ) + ) + + if fullargspec.varkw is not None: + parameters.append( + function_type_lib.Parameter( + fullargspec.varkw, + function_type_lib.Parameter.VAR_KEYWORD, + False, + None, + ) + ) + + return function_type_lib.FunctionType(parameters), default_values + + +def to_input_signature(function_type): + """Extracts an input_signature from function_type instance.""" + constrained_parameters = list(function_type.parameters.keys()) + + # self does not have a constraint in input_signature + if "self" in constrained_parameters: + constrained_parameters.pop(0) + + # There are no parameters to constrain. + if not constrained_parameters: + return tuple() + + constraints = [] + is_auto_constrained = False + + for parameter_name in constrained_parameters: + parameter = function_type.parameters[parameter_name] + constraint = None + if parameter.type_constraint: + # Generate legacy constraint representation. + constraint = parameter.type_constraint.placeholder_value( + trace_type.InternalPlaceholderContext(unnest_only=True) + ) + if any( + not isinstance(arg, tensor.TensorSpec) + for arg in nest.flatten([constraint], expand_composites=True) + ): + # input_signature only supports contiguous TensorSpec composites + is_auto_constrained = True + break + else: + constraints.append(constraint) + + # All constraints were generated by FunctionType + if is_auto_constrained and not constraints: + return tuple() + + # If the list is empty then there was no input_signature specified. + return tuple(constraints) if constraints else None + + +def to_arg_names(function_type): + """Generates a list of arg names from a FunctionType.""" + arg_names = [] + for p in function_type.parameters.values(): + if p.kind in { + function_type_lib.Parameter.POSITIONAL_ONLY, + function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, + }: + arg_names.append(p.name) + return arg_names + + +# TODO(b/214462107): Minimize API surface for FunctionSpec. +class FunctionSpec(object): + """Specification of how to bind arguments to a function. + + Deprecated. Please use FunctionType instead. + """ + + @classmethod + def from_function_and_signature( + cls, python_function, input_signature, is_pure=False, jit_compile=None + ): + """Creates a FunctionSpec instance given a python function and signature. + + Args: + python_function: a function to inspect + input_signature: a signature of the function (None, if variable) + is_pure: if True all input arguments (including variables and constants) + will be converted to tensors and no variable changes allowed. + jit_compile: see `tf.function` + + Returns: + instance of FunctionSpec + """ + function_type, default_values = make_function_type( + python_function, input_signature) + # Get the function's name. Remove functools.partial wrappers if necessary. + while isinstance(python_function, functools.partial): + python_function = python_function.func + name = getattr(python_function, "__name__", "f") + + return FunctionSpec( + function_type, + default_values, + is_pure=is_pure, + jit_compile=jit_compile, + name=name, + ) + + @classmethod + def from_fullargspec_and_signature( + cls, + fullargspec, + input_signature, + is_pure=False, + name=None, + jit_compile=None, + ): + """Construct FunctionSpec from legacy FullArgSpec format.""" + function_type, default_values = to_function_type(fullargspec) + if input_signature: + input_signature = tuple(input_signature) + _validate_signature(input_signature) + function_type = function_type_lib.add_type_constraints( + function_type, input_signature, default_values + ) + + return FunctionSpec( + function_type, default_values, is_pure, name, jit_compile + ) + + def __init__( + self, + function_type, + default_values, + is_pure=False, + name=None, + jit_compile=None, + ): + """Constructs a FunctionSpec describing a python function. + + Args: + function_type: A FunctionType describing the python function signature. + default_values: Dictionary mapping parameter names to default values. + is_pure: if True all input arguments (including variables and constants) + will be converted to tensors and no variable changes allowed. + name: Name of the function + jit_compile: see `tf.function`. + """ + self._function_type = function_type + self._default_values = default_values + self._fullargspec = to_fullargspec(function_type, default_values) + self._is_pure = is_pure + self._jit_compile = jit_compile + + # TODO(edloper): Include name when serializing for SavedModel? + self._name = name or "f" + self._input_signature = to_input_signature(function_type) + + @property + def default_values(self): + """Returns dict mapping parameter names to default values.""" + return self._default_values + + @property + def function_type(self): + """Returns a FunctionType representing the Python function signature.""" + return self._function_type + + @property + def fullargspec(self): + return self._fullargspec + + # TODO(fmuham): Replace usages with FunctionType and remove. + @property + def input_signature(self): + return self._input_signature + + # TODO(fmuham): Replace usages with FunctionType and remove. + @property + def flat_input_signature(self): + return tuple(nest.flatten(self.input_signature, expand_composites=True)) + + @property + def is_pure(self): + return self._is_pure + + @property + def jit_compile(self): + return self._jit_compile + + # TODO(fmuham): Replace usages and remove. + @property + def arg_names(self): + return to_arg_names(self.function_type) + + def signature_summary(self, default_values=False): + """Returns a string summarizing this function's signature. + + Args: + default_values: If true, then include default values in the signature. + + Returns: + A `string`. + """ + summary = f"{self._function_type!r}" + if default_values: + summary += "\nDefaults:" + if self.default_values: + for name, value in self.default_values.items(): + summary += f"\n {name}: {value!r}" + else: + summary += "\n None" + return summary + + +def make_function_type(python_function, input_signature): + """Generates a FunctionType for python_function.""" + _validate_signature(input_signature) + + function_type = function_type_lib.FunctionType.from_callable( + python_function + ) + default_values = function_type_lib.FunctionType.get_default_values( + python_function + ) + + if input_signature is not None: + input_signature = tuple(input_signature) + function_type = function_type_lib.add_type_constraints( + function_type, input_signature, default_values + ) + + return function_type, default_values + + +def make_canonicalized_monomorphic_type( + args: Any, + kwargs: Any, + capture_types: Any, + polymorphic_type, +) -> Tuple[function_type_lib.FunctionType, trace_type.InternalTracingContext]: + """Generates function type given the function arguments.""" + kwargs = { + function_type_lib.sanitize_arg_name(name): value + for name, value in kwargs.items() + } + + function_type, type_context = ( + function_type_lib.canonicalize_to_monomorphic( + args, kwargs, {}, capture_types, polymorphic_type + ) + ) + + return function_type, type_context + + +def canonicalize_function_inputs( + args, kwargs, function_type, default_values=None, is_pure=False +): + """Canonicalizes `args` and `kwargs`. + + Canonicalize the inputs to the Python function using FunctionType. + In particular, we parse the varargs and kwargs that the + original function was called with into a tuple corresponding to the + Python function's positional (named) arguments and a dictionary + corresponding to its kwargs. Missing default arguments are added. + + If the FunctionType has an type constraints, then they are used to convert + arguments to tensors; otherwise, any inputs containing numpy arrays are + converted to tensors. + + + Args: + args: The varargs this object was called with. + kwargs: The keyword args this function was called with. + function_type: FunctionType to canonicalize against. + default_values: Default values to use. + is_pure: Force variable inputs to Tensors. + + Returns: + A canonicalized ordering of the inputs, as well as full and filtered + (Tensors and Variables only) versions of their concatenated flattened + representations, represented by a tuple in the form (args, kwargs, + flat_args, filtered_flat_args). Here: `args` is a full list of bound + arguments, and `kwargs` contains only true keyword arguments, as opposed + to named arguments called in a keyword-like fashion. + + Raises: + ValueError: If a keyword in `kwargs` cannot be matched with a positional + argument when an input signature is specified, or when the inputs + do not conform to the input signature. + """ + default_values = {} if not default_values else default_values + if is_pure: + args, kwargs = _convert_variables_to_tensors(args, kwargs) + bound_arguments = bind_function_inputs( + args, kwargs, function_type, default_values + ) + return bound_arguments + + +def bind_function_inputs(args, kwargs, function_type, default_values): + """Bind `args` and `kwargs` into a canonicalized signature args, kwargs.""" + sanitized_kwargs = { + function_type_lib.sanitize_arg_name(k): v for k, v in kwargs.items() + } + if len(kwargs) != len(sanitized_kwargs): + raise ValueError( + "Name collision after sanitization. Please rename " + "tf.function input parameters. Original: " + f"{sorted(kwargs.keys())}, Sanitized: " + f"{sorted(sanitized_kwargs.keys())}" + ) + + try: + bound_arguments = function_type.bind_with_defaults( + args, sanitized_kwargs, default_values + ) + except Exception as e: + raise TypeError( + f"Binding inputs to tf.function failed due to `{e}`. " + f"Received args: {args} and kwargs: {sanitized_kwargs} for signature:" + f" {function_type}." + ) from e + return bound_arguments + + +def _validate_signature(signature): + """Checks the input_signature to be valid.""" + if signature is None: + return + + if not isinstance(signature, (tuple, list)): + raise TypeError( + "input_signature must be either a tuple or a list, got " + f"{type(signature)}." + ) + + # TODO(xjun): Allow VariableSpec once we figure out API for de-aliasing. + variable_specs = _get_variable_specs(signature) + if variable_specs: + raise TypeError( + f"input_signature doesn't support VariableSpec, got {variable_specs}" + ) + + if any( + not isinstance(arg, tensor.TensorSpec) + for arg in nest.flatten(signature, expand_composites=True) + ): + bad_args = [ + arg + for arg in nest.flatten(signature, expand_composites=True) + if not isinstance(arg, tensor.TensorSpec) + ] + raise TypeError( + "input_signature must be a possibly nested sequence of " + f"TensorSpec objects, got invalid args {bad_args} with " + f"types {list(six.moves.map(type, bad_args))}." + ) + + +def _to_tensor_or_tensor_spec(x): + return ( + x + if isinstance(x, (tensor.Tensor, tensor.TensorSpec)) + else ops.convert_to_tensor(x) + ) + + +def _convert_variables_to_tensors(args, kwargs): + args = [_to_tensor_or_tensor_spec(x) for x in args] + kwargs = {kw: _to_tensor_or_tensor_spec(x) for kw, x in kwargs.items()} + return tuple(args), kwargs + + +def _get_variable_specs(args): + """Returns `VariableSpecs` from `args`.""" + variable_specs = [] + for arg in nest.flatten(args): + if not isinstance(arg, type_spec.TypeSpec): + continue + if isinstance(arg, resource_variable_ops.VariableSpec): + variable_specs.append(arg) + elif not isinstance(arg, tensor.TensorSpec): + # arg is a CompositeTensor spec. + variable_specs.extend(_get_variable_specs(arg._component_specs)) # pylint: disable=protected-access + return variable_specs + + +def derive_from_graph(func_graph): + """Derives a FunctionType from FuncGraph.""" + # TODO(fmuham): Include structure info from structured_inputs + input_signature = ( + tuple(trace_type.from_value(i) for i in func_graph.inputs), + {}, + ) + + # TODO(fmuham): Include output structure info from structured_outputs + output_signature = tuple(trace_type.from_value(o) for o in func_graph.outputs) + + return function_type_lib.from_structured_signature( + input_signature, + output_signature, + func_graph.function_captures.capture_types, + ) + + +# TODO(fmuham): Replace usages with TraceType and remove. +def is_same_structure(structure1, structure2, check_values=False): + """Check two structures for equality, optionally of types and of values.""" + try: + nest.assert_same_structure(structure1, structure2, expand_composites=True) + except (ValueError, TypeError): + return False + if check_values: + flattened1 = nest.flatten(structure1, expand_composites=True) + flattened2 = nest.flatten(structure2, expand_composites=True) + # First check the types to avoid AttributeErrors. + if any(type(f1) is not type(f2) for f1, f2 in zip(flattened1, flattened2)): + return False + return flattened1 == flattened2 + return True diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py new file mode 100644 index 0000000000000000000000000000000000000000..e504410c8d5b153d849c86ea2d979d4bbad715ba --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py @@ -0,0 +1,1740 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""API for defining graph functions with some additional eager semantics. + +tf.function utilizes varying configurations of tracing compilation to allow +initializing `tf.Variable`s with subgraphs of the function. For example: + +```python +class M(tf.Module): + def __init__(self): + self.v_opinit = None + self.v_arginit = None + + @tf.function + def __call__(self, x): + # Variables are only created on the first call to the function. This is a + # common pattern in layer libraries. + if self.v_opinit is None: + # self.v_opinit will outlive the function call, but `tf.ones` is traced as + # part of the function body before the `tf.Variable` object is + # created. This subgraph is easy to lift out of the function. + self.v_opinit = tf.Variable(tf.ones([])) + + # If arguments feed into variable initialization, it can be very tricky to + # disentangle from the rest of the function. We don't attempt it. + self.v_arginit = tf.Variable(tf.ones(tf.shape(x)) * tf.constant(2.)) + return self.v_opinit + self.v_arginit + x +``` + +These patterns using tracing compilation directly throw an error asking +the user to put the variable's initializer in a lambda. With tf.function they +work with eager semantics either by lifting the subgraph out of the function and +using it to initialize the variable, or by initializing variables on the first +call to the function (if they weren't already initialized by something else, +e.g. a checkpoint API). The latter requires tf.conds, and is not well supported +by TF-XLA, so we only do it when necessary. + +Since these patterns are relatively common in layer libraries, we expose the +wrapper in this file as `tf.function`. The defun concept in quarantine.py is a +legacy internal API. + +In order to support these variable initialization patterns, tf.function defines +a variable subtype (UnliftedInitializerVariable) which collects the input +subgraph. This type of variable replaces the regular variable type on the first +tf.function trace. To exclude initializers from the function body (the `tf.ones` +ops above and associated assignment operations), tf.function traces a second +time if it sees variables on the first call. +""" + +import dataclasses +import functools +import os +import threading +import types as types_lib +import warnings +import weakref + +from google.protobuf import text_format as _text_format +from google.protobuf.message import DecodeError +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.function import trace_type +from tensorflow.core.function.capture import capture_container +from tensorflow.core.function.polymorphism import function_cache +from tensorflow.python.distribute.parallel_device import parallel_device +from tensorflow.python.eager import context +from tensorflow.python.eager import lift_to_graph +from tensorflow.python.eager import monitoring +from tensorflow.python.eager.polymorphic_function import attributes as attributes_lib +from tensorflow.python.eager.polymorphic_function import autograph_util +from tensorflow.python.eager.polymorphic_function import compiler_ir +from tensorflow.python.eager.polymorphic_function import eager_function_run +from tensorflow.python.eager.polymorphic_function import function_type_utils +from tensorflow.python.eager.polymorphic_function import tf_method_target +from tensorflow.python.eager.polymorphic_function import tracing_compilation +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import errors +from tensorflow.python.framework import func_graph as func_graph_module +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import control_flow_util +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.profiler import trace +from tensorflow.python.trackable import base as trackable +from tensorflow.python.types import core +from tensorflow.python.util import deprecation +from tensorflow.python.util import nest +from tensorflow.python.util import object_identity +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import traceback_utils +from tensorflow.python.util.tf_export import tf_export + +FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY = 10 +FREQUENT_TRACING_WARNING_THRESHOLD = 5 +FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR = 2 + +_tf_function_counter = monitoring.Counter( + "/tensorflow/core/tf_function_counter", + "Counter for the number of tf.functions created when Eager execution is " + "enabled.", + # jit_compile is "0" or "1". + "jit_compile") + + +class _FrequentTracingDetector(object): + """Class keeping track of how many recent calls triggered tracing.""" + + __slots__ = ["_calls_per_tracings", "_call_count", "_total_warning_count"] + + def __init__(self): + self._calls_per_tracings = [] + self._total_warning_count = 0 + self._call_count = 0 + + def called_with_tracing(self, function_name, omit_warning): + """Updates the list of most recent calls' tracing information. + + Warns the user when recent calls caused retracing too often. + + Args: + function_name: the python function being traced. + omit_warning: If 'True', this call will not warn the user even if + retracing happens too often. + """ + self._call_count += 1 + self._calls_per_tracings.append(1) + + while self._calls_per_tracings: + if (self._call_count - self._calls_per_tracings[0] > + FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY): + self._call_count -= self._calls_per_tracings.pop(0) + else: + break + + if (omit_warning or self._total_warning_count >= + FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR): + return + if len(self._calls_per_tracings) >= FREQUENT_TRACING_WARNING_THRESHOLD: + self._total_warning_count += 1 + logging.warning( + "{} out of the last {} calls to {} triggered tf.function " + "retracing. Tracing is expensive and the excessive number of " + "tracings could be due to (1) creating @tf.function repeatedly in " + "a loop, (2) passing tensors with different shapes, (3) passing " + "Python objects instead of tensors. For (1), please define your " + "@tf.function outside of the loop. For (2), @tf.function has " + "reduce_retracing=True option that can avoid unnecessary " + "retracing. For (3), please refer to " + "https://www.tensorflow.org/guide/function#controlling_retracing" + " and https://www.tensorflow.org/api_docs/python/tf/function for " + " more details.".format( + len(self._calls_per_tracings), self._call_count, function_name)) + + def called_without_tracing(self): + # We don't count tracing when users load a concrete function directly or + # call get_concrete_function, so the first call can be not a tracing call. + if not self._calls_per_tracings: + self._calls_per_tracings = [0] + self._calls_per_tracings[-1] += 1 + self._call_count += 1 + + +class _FrequentTracingDetectorManager(object): + """Class for the management of all _FrequentTracingDetector objects.""" + + __slots__ = ["_detectors", "_lock"] + + def __init__(self): + self._detectors = weakref.WeakKeyDictionary() # GUARDED_BY(self._lock) + self._lock = threading.Lock() + + def _get_detector(self, key): + if key not in self._detectors: + self._detectors[key] = _FrequentTracingDetector() + return self._detectors[key] + + def called_without_tracing(self, key): + with self._lock: + detector = self._get_detector(key) + detector.called_without_tracing() + + def called_with_tracing(self, key, function_name, omit_warning): + with self._lock: + detector = self._get_detector(key) + detector.called_with_tracing(function_name, omit_warning) + + +_frequent_tracing_detector_manager = _FrequentTracingDetectorManager() + + +class UnliftedInitializerVariable(resource_variable_ops.UninitializedVariable): + """Variable which does not lift its initializer out of function context. + + Instances of this variable, when created, build a graph which runs their + initializer inside a tf.cond(is_initialized) block. + + This can only be created during tracing compilation called from + (eventually) eager mode. That is, non-function-building graphs are not + supported. + """ + + def __init__( + self, + initial_value=None, + trainable=None, + caching_device=None, + name=None, + dtype=None, + constraint=None, + add_initializers_to=None, + synchronization=None, + aggregation=None, + shape=None, + **unused_kwargs, + ): + """Creates a variable. + + Args: + initial_value: A `Tensor`, or Python object convertible to a `Tensor`, + which is the initial value for the Variable. The initial value must have + a shape specified unless `validate_shape` is set to False. Can also be a + callable with no argument that returns the initial value when called. + (Note that initializer functions from init_ops.py must first be bound to + a shape before being used here.) + trainable: If `True`, GradientTapes automatically watch uses of this + Variable. + caching_device: Optional device string or function describing where the + Variable should be cached for reading. Defaults to the Variable's + device. If not `None`, caches on another device. Typical use is to + cache on the device where the Ops using the Variable reside, to + deduplicate copying through `Switch` and other conditional statements. + name: Optional name for the variable. Defaults to `'Variable'` and gets + uniquified automatically. + dtype: If set, initial_value will be converted to the given type. If None, + either the datatype will be kept (if initial_value is a Tensor) or + float32 will be used (if it is a Python object convertible to a Tensor). + constraint: An optional projection function to be applied to the variable + after being updated by an `Optimizer` (e.g. used to implement norm + constraints or value constraints for layer weights). The function must + take as input the unprojected Tensor representing the value of the + variable and return the Tensor for the projected value (which must have + the same shape). Constraints are not safe to use when doing asynchronous + distributed training. + add_initializers_to: if not None and not in legacy graph mode, the + initializer tensor will be added to this map in addition to adding the + assignment to the function. + synchronization: Indicates when a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableSynchronization`. By default the synchronization is set to + `AUTO` and the current `DistributionStrategy` chooses when to + synchronize. + aggregation: Indicates how a distributed variable will be aggregated. + Accepted values are constants defined in the class + `tf.VariableAggregation`. + shape: (optional) The shape of this variable. If None, the shape of + `initial_value` will be used. When setting this argument to + `tf.TensorShape(None)` (representing an unspecified shape), the variable + can be assigned with values of different shapes. + + Raises: + ValueError: If the initial value is not specified, or does not have a + shape and `validate_shape` is `True`. + RuntimeError: If called outside of a function definition. + """ + with ops.init_scope(): + self._in_graph_mode = not context.executing_eagerly() + if not ops.inside_function(): + # If we've been init_scope()d out of the function definition nothing to do + # here; we can't really do the capturing or conditional logic. + resource_variable_ops.ResourceVariable.__init__( + self, initial_value=initial_value, trainable=trainable, + caching_device=caching_device, name=name, dtype=dtype, + constraint=constraint) + return + if initial_value is None: + raise ValueError("`initial_value` must be a Tensor or a Python " + "object convertible to a Tensor. Got None.") + init_from_fn = callable(initial_value) + + if constraint is not None and not callable(constraint): + raise ValueError(f"`constraint` with type {type(constraint)} must be a " + "callable.") + + with ops.name_scope(name, "Variable", [] + if init_from_fn else [initial_value]) as scope_name: + with ops.name_scope("Initializer"): + if init_from_fn: + initial_value = initial_value() + if isinstance(initial_value, trackable.CheckpointInitialValue): + self._maybe_initialize_trackable() + self._update_uid = initial_value.checkpoint_position.restore_uid + initial_value = initial_value.wrapped_value + + initial_value = ops.convert_to_tensor(initial_value, + name="initial_value", dtype=dtype) + assert initial_value is not None + + # Don't use `shape or initial_value.shape` since TensorShape has + # overridden `__bool__`. + if shape is None: + shape = initial_value.shape + + # Use the constructor for UninitializedVariable to start. Outside the name + # scope so we don't double up the prefix. + super().__init__( + trainable=trainable, + caching_device=caching_device, + name=name, + shape=shape, + dtype=initial_value.dtype, + constraint=constraint, + synchronization=synchronization, + aggregation=aggregation, + extra_handle_data=initial_value, + **unused_kwargs) + + with ops.name_scope(scope_name): + if self._in_graph_mode: + with ops.init_scope(): + outer_graph = ops.get_default_graph() + func_graph = ops.get_default_graph() + function_placeholders = ( + func_graph.inputs + func_graph.internal_captures) + placeholder_ops = set( + [tensor.op for tensor in function_placeholders]) + lifted_initializer = lift_to_graph.lift_to_graph( + [initial_value], outer_graph, + disallowed_placeholders=placeholder_ops)[initial_value] + with ops.init_scope(): + self._initial_value = lifted_initializer + with ops.name_scope("IsInitialized"): + self._is_initialized_op = ( + resource_variable_ops.var_is_initialized_op(self._handle)) + if initial_value is not None: + with ops.name_scope("Assign") as n, ops.colocate_with(self._handle): + self._initializer_op = resource_variable_ops.assign_variable_op( + self._handle, lifted_initializer, name=n) + elif context.executing_eagerly(): + # In this case, both current scope and init scope are eager. + # Assign_variable_op will be executed immediately. So we don't need to + # add it to "add_initializers_to" to lift it out. + with ops.name_scope("Assign") as n, ops.colocate_with(self._handle): + resource_variable_ops.assign_variable_op( + self._handle, initial_value, name=n) + else: + # Init scope is eager but current scope is graph. We will lift out this + # variable by addint it into "add_initializers_to". + if add_initializers_to is not None: + add_initializers_to.append((self, initial_value)) + + def assign_fn(): + with ops.name_scope("Assign") as n, ops.colocate_with(self._handle): + resource_variable_ops.assign_variable_op( + self._handle, + initial_value, + name=n) + # Returning values to keep tf.cond happy. + return ops.convert_to_tensor(1) + def not_assign_fn(): + return ops.convert_to_tensor(0) + # Note: this cond is always guaranteed to run because we're inside + # tracing compilation which will insert automatic control dependencies. + # It will only execute assign_fn if lifting failed. + graph = ops.get_default_graph() + + # Capture the handle ahead of time in order to avoid querying the shape + # of the handle which helps async execution performance + graph.capture(self._handle, shape=()) + cond.cond( + resource_variable_ops.var_is_initialized_op(self._handle), + not_assign_fn, assign_fn) + + +JIT_COMPILE_FUNCTIONS = ( + os.getenv("TF_FUNCTION_JIT_COMPILE_DEFAULT", "false").lower() + in ("true", "1")) + + +def _evaluate_var_is_initialized(variables): + """Compute booleans indicating whether each variable is initialized.""" + with ops.init_scope(): + var_is_initialized = [] + for v in variables: + var_is_initialized.append( + resource_variable_ops.var_is_initialized_op(v.handle)) + try: + # Stack all the var_is_initialized values into one tensor and interpret + # the numpy value. This will reduce the number of RPCs between client and + # worker in the remote case. + return array_ops_stack.stack(var_is_initialized).numpy() + except errors.UnimplementedError: + # Some devices do not support implicit copy-off to host. Fall back to + # variable-by-variable processing. + for index, v in enumerate(variables): + try: + numpy_value = var_is_initialized[index].numpy() + except errors.UnimplementedError: + # This is a variable on a parallel device; we'll extract its value on + # each replica and assert that they're identical. + components = parallel_device.unpack(var_is_initialized[index]) + with ops.device(None): + components = array_ops_stack.stack(components) + all_initialized = math_ops.reduce_all(components).numpy() + any_initialized = math_ops.reduce_any(components).numpy() + if all_initialized != any_initialized: + raise NotImplementedError( + f"Some but not all components of a parallel variable {v!r} " + "were initialized between their creation in a tf.function and " + "the function's trace having completed. This is not " + "supported; consider initializing either all or none of the " + "components, or moving initialization out of the function.") + numpy_value = all_initialized + var_is_initialized[index] = numpy_value + return var_is_initialized + + +class OptionalXlaContext: + """Wrapper for XLA context optionally applied under a context manager.""" + + def __init__(self, is_compiled): + wrap = is_compiled and not control_flow_util.GraphOrParentsInXlaContext( \ + ops.get_default_graph()) + self.xla_context = control_flow_ops.XLAControlFlowContext() \ + if wrap else None + + def __enter__(self): + if self.xla_context: + self.xla_context.Enter() + + def __exit__(self, t, value, traceback): + if self.xla_context: + self.xla_context.Exit() + + +@tf_export("__internal__.function.Function", v1=[]) +class Function(core.PolymorphicFunction, trackable.Trackable): + """A `tf.types.experimental.PolymorphicFunction` created by `tf.function`. + + Currently, individual methods/attributes under this class are not guaranteed + by the TF API contract, and are subject to future changes. + + (Previously also known as `tf.types.experimental.GenericFunction`) + """ + + def __init__(self, + python_function, + name, + input_signature=None, + autograph=True, + jit_compile=None, + reduce_retracing=False, + experimental_implements=None, + experimental_autograph_options=None, + experimental_attributes=None,): + """Initializes a `Function`. + + Args: + python_function: the function to be wrapped. + name: the name given to it. + input_signature: See the documentation for `tf.function`. + autograph: See the documentation for `tf.function`. + jit_compile: See the documentation for `tf.function`. + reduce_retracing: See the documentation for `tf.function`. + experimental_implements: See the documentation for `tf.function`. + experimental_autograph_options: See the documentation for `tf.function`. + experimental_attributes: See the documentation for `tf.function`. + + Raises: + ValueError: if `input_signature` is not None and the `python_function`'s + argspec has keyword arguments. + """ + self._lock = threading.RLock() + self._python_function = python_function + self._function_type, self._default_values = ( + function_type_utils.make_function_type(python_function, input_signature) + ) + self._function_cache = function_cache.FunctionCache() + self._function_captures = capture_container.FunctionCaptures() + + self._attributes = {} + if experimental_implements is not None: + self._attributes = self._create_implements_attribute( + experimental_implements + ) + + if experimental_attributes is not None: + self._attributes.update(experimental_attributes) + + for attribute in self._attributes: + if attribute not in attributes_lib.POLYMORPHIC_FUNCTION_ALLOWLIST: + raise ValueError( + f"`{attribute} is not supported by tf.function as an attribute." + ) + + self._is_pure = ( + self._attributes and attributes_lib.IMPLEMENTS in self._attributes + ) + # If `True`, the function uses the rendezvous of the parent. This is only + # needed to support code where raw send/recv operations are inserted and + # when functions are run in graph mode where they may not be inlined. + self._shared_rendezvous = None + self._autograph = autograph + self._experimental_autograph_options = experimental_autograph_options + self._reduce_retracing = reduce_retracing + self._jit_compile = jit_compile + self._created_variables = None # GUARDED_BY(self._lock) + self._variable_creation_config = None # GUARDED_BY(self._lock) + self._no_variable_creation_config = None # GUARDED_BY(self._lock) + self._descriptor_cache = weakref.WeakKeyDictionary() + self._name = name + self._key_for_call_stats = self._get_key_for_call_stats() + self._omit_frequent_tracing_warning = False + ops._tf_function_api_gauge.get_cell().set(True) # pylint: disable=protected-access + + @property + def name(self): + return self._name + + def __getstate__(self): + """Custom pickling, to omit unpickleable objects.""" + result = self.__dict__.copy() + del result["_lock"] + del result["_descriptor_cache"] + del result["_key_for_call_stats"] + return result + + def __setstate__(self, state): + """Restore from pickled state.""" + self.__dict__ = state + self._lock = threading.RLock() + self._descriptor_cache = weakref.WeakKeyDictionary() + self._key_for_call_stats = self._get_key_for_call_stats() + + def _get_key_for_call_stats(self): + """Returns key instance to track call stats and retracings. + + The key instance a best-effort to preserve global consistency. + """ + target_function = self._python_function + # `__wrapped__` is a conventional Python attribute that a higher-order + # function keeps its original function's instance. We also directly use + # this attribute for dealing with a class method. See + # `bound_method_wrapper` in `function.py`. If we don't use `__wrapped__`, + # all class methods will return the same `bound_method_wrapper` instance + # from this function. + while hasattr(target_function, "__wrapped__"): + target_function = target_function.__wrapped__ + + if hasattr(target_function, "__func__"): + target_function = target_function.__func__ + + if hasattr(target_function, "__code__"): + return target_function.__code__ + + return self._python_function + + def _generate_scoped_tracing_options(self, scope, scope_type): + """Creates TracingOptions for variable creator scopes.""" + + weak_wrapped_fn = None + compile_with_xla = self._jit_compile + + def wrapped_fn(*args, **kwds): + """Wraps `self._python_function` in a variable creator scope.""" + # We register a variable creator with reduced priority. If an outer + # variable creator is just modifying keyword arguments to the variable + # constructor, this will work harmoniously. Since the `scope` registered + # here actually creates the variable, it taking priority would otherwise + # ignore the outer creator. + # + # If an outer variable creator calls the variable constructor manually, + # for example creating a MirroredVariable, then they won't call our + # creator. This means we won't be able to trace the initialization graph, + # and so variable initializers can't depend on function arguments. This is + # better than the alternative, tracing the initialization graph but giving + # the user a variable type they didn't want. + default_graph = ops.get_default_graph() + with default_graph._variable_creator_scope(scope, priority=50): # pylint: disable=protected-access + # __wrapped__ allows AutoGraph to swap in a converted function. We give + # the function a weak reference to itself to avoid a reference cycle. + with OptionalXlaContext(compile_with_xla): + out = weak_wrapped_fn().__wrapped__(*args, **kwds) + return out + + weak_wrapped_fn = weakref.ref(wrapped_fn) + + return self._generate_tracing_options(tf_decorator.make_decorator( + self._python_function, + wrapped_fn), scope_type) + + def _create_implements_attribute(self, implements_arg): + """Creates the attribute value corresponding to attribute_lib.IMPLEMENTS.""" + attributes = {} + if isinstance(implements_arg, str): + # First check if the attribute_lib.IMPLEMENTS is specified as a + # NameAttrList. This is used when apart from the function name being + # implemented, a list of attributes is also being specified. + # The attributes are specified as key-value pairs in the NameAttrList + # of the corresponding AttrValue. The function name will be in the + # 'name' field of the NameAttrList. Else, it is just a string + # corresponding to the function name. + try: + attr_value = attr_value_pb2.AttrValue() + nameattrlist = attr_value_pb2.NameAttrList() + _text_format.Merge(implements_arg, nameattrlist) + attr_value.func.CopyFrom(nameattrlist) + attributes[attributes_lib.IMPLEMENTS] = attr_value + except (_text_format.ParseError, DecodeError): + attributes[attributes_lib.IMPLEMENTS] = implements_arg + return attributes + + def _generate_tracing_options(self, fn, scope_type): + """Return a TracingOptions catered to the input function.""" + attributes = self._attributes.copy() + + share = self._shared_rendezvous + if share is not None: + attributes[attributes_lib.SHARED_RENDEZVOUS] = share + + if self._jit_compile is not None: + attributes[attributes_lib.XLA_COMPILE] = bool(self._jit_compile) + if self._jit_compile: + attributes[attributes_lib.NO_INLINE] = True + + if self._autograph: + fn = autograph_util.py_func_from_autograph( + fn, self._experimental_autograph_options) + + return tracing_compilation.TracingOptions( + fn, + self._name, + polymorphic_type=self._function_type, + default_values=self._default_values, + scope_type=scope_type, + attributes=attributes, + autograph=self._autograph, + reduce_retracing=self._reduce_retracing, + autograph_options=self._experimental_autograph_options, + function_cache=self._function_cache, + function_captures=self._function_captures, + lock=self._lock, + ) + + def _initialize(self, args, kwds, add_initializers_to=None): + """Initializes, on the first call. + + Creates two `Function`s, one that will allow creation of variables + and one that won't. + + Additionally runs a trace for the `Function` that allows creation + of variables. + + Args: + args: Arguments to the underlying python callable. + kwds: Keyword arguments to the python callable. + add_initializers_to: Where to collect variable initializers, if not None. + """ + created_variables = [] + + def variable_capturing_scope(next_creator, **kwds): + """Creates UnliftedInitializerVariables and saves references to them.""" + enable_variable_lifting = kwds.get("experimental_enable_variable_lifting") + if enable_variable_lifting is None: + enable_variable_lifting = True + if not enable_variable_lifting: + return next_creator(**kwds) + v = UnliftedInitializerVariable( + add_initializers_to=add_initializers_to, **kwds + ) + created_variables.append(weakref.ref(v)) + return v + + self._created_variables = created_variables + self._variable_creation_config = self._generate_scoped_tracing_options( + variable_capturing_scope, + tracing_compilation.ScopeType.VARIABLE_CREATION, + ) + # Force the definition of the function for these arguments + self._concrete_variable_creation_fn = tracing_compilation.trace_function( + args, kwds, self._variable_creation_config + ) + + def invalid_creator_scope(*unused_args, **unused_kwds): + """Disables variable creation.""" + raise ValueError( + "tf.function only supports singleton tf.Variables created on the " + "first call. Make sure the tf.Variable is only created once or " + "created outside tf.function. See " + "https://www.tensorflow.org/guide/function#creating_tfvariables " + "for more information.") + + self._no_variable_creation_config = self._generate_scoped_tracing_options( + invalid_creator_scope, + tracing_compilation.ScopeType.NO_VARIABLE_CREATION, + ) + + def _clone(self, python_function): + """Clone the function with different python function.""" + f = Function( + python_function=(self._python_function + if python_function is None else python_function), + name=self._name, + input_signature=self.input_signature, + autograph=self._autograph, + jit_compile=self._jit_compile, + reduce_retracing=self._reduce_retracing, + experimental_attributes=self._attributes, + experimental_autograph_options=self._experimental_autograph_options) + + if self._shared_rendezvous: + f._shared_rendezvous = self._shared_rendezvous # pylint: disable=protected-access + + return f + + def _decorate(self, decorator): + """Allows the captured Python function to be decorated in place. + + This method is only safe to call when the Function has not been called by a + user. It makes sense to use this method to push a decorator into the + function rather than wrapping the function in the decorator. + + We use this in tf.Module to allow user annotated `tf.functions` to remain as + `Function` objects but still automatically enter the Module name_scope + when they are evaluated like all other methods. + + Args: + decorator: A callable accepting a single argument which is the function + to decorate and returning a callable result. + + Raises: + ValueError: If the function has been called a ValueError is raised. + """ + if ( + self._variable_creation_config is not None + or self._no_variable_creation_config is not None + ): + raise ValueError( + "Functions cannot be decorated after they have been traced." + ) + + self._python_function = decorator(self._python_function) + self._function_type, self._default_values = ( + function_type_utils.make_function_type( + self._python_function, self.input_signature + ) + ) + + # TODO: Remove this private method after updating all its uses + # A good moment to do this could be when the experimental label is removed + def _get_tracing_count(self): + return self.experimental_get_tracing_count() + + def experimental_get_tracing_count(self): + """Returns the number of times the function has been traced. + + For more information on when a function is traced and when it is + traced multiple times see https://www.tensorflow.org/guide/function. + Example: + + >>> @tf.function + ... def double(a): + ... return a + a + >>> double(tf.constant(1)) + >>> double(tf.constant(2)) + >>> double.experimental_get_tracing_count() + 1 + >>> double(tf.constant("a")) + >>> double.experimental_get_tracing_count() + 2 + + + The first time experimental_get_tracing_count is called + it returns 1, as the function is traced the first + time it is called, and the second time the same graph is used + since we're calling it with a parameter of the same type. + + The second time experimental_get_tracing_count is called + it returns 2, as we called double with a + different argument type, and so it was traced again. + + """ + return len(self._function_cache) + + @property + def _run_functions_eagerly(self): + return eager_function_run.RUN_FUNCTIONS_EAGERLY + + @traceback_utils.filter_traceback + def __call__(self, *args, **kwds): + # Implements PolymorphicFunction.__call__. + if self._run_functions_eagerly: + with trace.Trace(self._name, tf_function_call="eager"): + return self._python_function(*args, **kwds) + + # Only count the statistics the first time, before initialization took + # place. + if self._created_variables is None: + compiled = bool(self._jit_compile and + not control_flow_util.GraphOrParentsInXlaContext( + ops.get_default_graph())) + # For nested functions, increment the counter only when a function with + # jit_compile=True is called within a function with jit_compile=False. We + # count this special case to correctly record that both jit_compile=True + # and jit_compile=False is being used for parts of the outer function. + if ops.executing_eagerly_outside_functions() and ( + context.executing_eagerly() or compiled): + # Labels must be strings in Python, so we convert 'compiled' to a string + _tf_function_counter.get_cell(str(int(compiled))).increase_by(1) + + tracing_count = self.experimental_get_tracing_count() + with trace.Trace(self._name) as tm: + # TODO(cheshire): Do not duplicate the XLAControlFlowContext annotation. + compiler = "xla" if self._jit_compile else "nonXla" + + with OptionalXlaContext(self._jit_compile): + result = self._call(*args, **kwds) + + new_tracing_count = self.experimental_get_tracing_count() + without_tracing = (tracing_count == new_tracing_count) + execution_mode = "notTraced" if without_tracing else "traced" + tm.set_metadata(tf_function_call=execution_mode + "-" + compiler, + tracing_count=new_tracing_count) + + if context.executing_eagerly(): + if without_tracing: + _frequent_tracing_detector_manager.called_without_tracing( + self._key_for_call_stats) + else: + _frequent_tracing_detector_manager.called_with_tracing( + self._key_for_call_stats, self._python_function, + self._omit_frequent_tracing_warning) + + return result + + def _call(self, *args, **kwds): + """Calls the graph function.""" + self._lock.acquire() + bound_args = function_type_utils.canonicalize_function_inputs( + args, + kwds, + self._function_type, + self._default_values, + self._is_pure, + ) + args, kwds = bound_args.args, bound_args.kwargs + if self._created_variables: + # Release the lock early so that multiple threads can perform the call + # in parallel. + self._lock.release() + # In this case we have created variables on the first call, so we run the + # defunned version which is guaranteed to never create variables. + return tracing_compilation.call_function( + args, kwds, self._no_variable_creation_config + ) + elif self._variable_creation_config is not None: + # Release the lock early so that multiple threads can perform the call + # in parallel. + self._lock.release() + # In this case we have not created variables on the first call. So we can + # run the first trace but we should fail if variables are created. + results = tracing_compilation.call_function( + args, kwds, self._variable_creation_config + ) + if self._created_variables: + raise ValueError("Creating variables on a non-first call to a function" + " decorated with tf.function.") + return results + + try: + # This is the first call of __call__, so we have to initialize. + initializers = [] + self._initialize(args, kwds, add_initializers_to=initializers) + finally: + # At this point we know that the initialization is complete (or less + # interestingly an exception was raised) so we no longer need a lock. + self._lock.release() + + if self._created_variables: + try: + # Attempt to initialize variables eagerly and without conds by lifting + # out initialization graphs. This is the only initialization strategy + # compatible with XLA at the moment. + self._initialize_uninitialized_variables(initializers) + except lift_to_graph.UnliftableError: + pass # Fall through to cond-based initialization. + else: + # Lifting succeeded, so variables are initialized and we can run the + # no_variable_creation function. + return tracing_compilation.call_function( + args, kwds, self._no_variable_creation_config + ) + else: + bound_args = self._concrete_variable_creation_fn.function_type.bind( + *args, **kwds + ) + # If we did not create any variables the trace we have is good enough. + filtered_flat_args = ( + self._concrete_variable_creation_fn.function_type.unpack_inputs( + bound_args + ) + ) + return self._concrete_variable_creation_fn._call_flat( # pylint: disable=protected-access + filtered_flat_args, + self._concrete_variable_creation_fn.captured_inputs, + ) + + def fn_with_cond(inner_args, inner_kwds): + """Conditionally runs initialization if it's needed.""" + condition = True + for v, _ in initializers: + condition = math_ops.logical_and( + condition, resource_variable_ops.var_is_initialized_op( + v.handle)) + # We want to call no_variable_creation if possible because it avoids + # recomputing potentially expensive initializers. + return cond.cond( + condition, + lambda: tracing_compilation.call_function( # pylint: disable=g-long-lambda + inner_args, inner_kwds, self._no_variable_creation_config + ), + lambda: self._concrete_variable_creation_fn( # pylint: disable=g-long-lambda + *inner_args, **inner_kwds + ), + ) + + # We've created variables and are unable to lift the initialization graphs, + # so we fall back to initializing with conds while running the function. + # TODO(b/216870587) Note that this path is not currently supported for XLA. + if self._jit_compile: + raise errors.UnimplementedError( + None, None, + "We failed to lift variable creations out of this tf.function, " + "so this tf.function cannot be run on XLA. A possible workaround is " + "to move variable creation outside of the XLA compiled function.") + canon_args, canon_kwds = bound_args.args, bound_args.kwargs + options = tracing_compilation.TracingOptions(fn_with_cond, "fn_with_cond") + return tracing_compilation.call_function( + (canon_args, canon_kwds), {}, options + ) + + def experimental_get_compiler_ir(self, *args, **kwargs): + # Implements PolymorphicFunction.experimental_get_compiler_ir + context.ensure_initialized() + if not self._jit_compile: + raise ValueError("Compiler IR can only be returned for functions marked " + "with 'jit_compile=True'") + + is_tensor_spec = lambda x: isinstance(x, tensor_spec.TensorSpec) + + def _check_inputs(args, kwargs): + all_inputs = list(args) + list(kwargs.values()) + # Emtpy input is okay. + if not all_inputs: + return + if any(map(is_tensor_spec, all_inputs)) and any( + map(lambda x: not is_tensor_spec(x), all_inputs) + ): + raise ValueError( + "experimental_get_compiler_ir supports either " + "(1) all inputs are TensorSpec or " + "(2) all inputs are tf.Tensor/python variables" + ) + + _check_inputs(args, kwargs) + if ( + len(args) + len(kwargs.values()) > 0 + and all(map(is_tensor_spec, args)) + and all(map(is_tensor_spec, kwargs.values())) + ): + # For the case inputs are not empty and input types are all tf.TensorSpec + concrete_fn = self.get_concrete_function(*args, **kwargs) + return compiler_ir.from_concrete_function(concrete_fn) + + concrete_fn = self.get_concrete_function(*args, **kwargs) + fn_name = concrete_fn.name + + # pylint: disable=protected-access + bound_args = function_type_utils.canonicalize_function_inputs( + args, kwargs, concrete_fn.function_type + ) + filtered_flat_args = concrete_fn.function_type.unpack_inputs(bound_args) + + def compiler_ir_generator( + stage="hlo", device_name=None, platform_name=None + ): + """Gets the compiler IR bytes. + + Args: + stage: The exported stage for the given function. + device_name: The name of the device with the form as + "/job:localhost/replica:0/task:0/device:CPU:0", "/device:TPU:0" etc. + When this is used, actual device is used for getting the compiler IR. + platform_name: The name of the platform, e.g. "TPU". See the comment in + `get_compiler_ir` in `context.py`. + + Returns: + The compiler IR bytes. + """ + if device_name is not None: + if platform_name is not None: + raise ValueError( + "device_name and platform_name cannot be provided at the same" + " time." + ) + warnings.warn("device_name is being deprecated. Use platform_name.") + device_name = compiler_ir.maybe_get_device_name(device_name) + res_bytes = context.context().get_compiler_ir( + device_name=device_name, + platform_name=platform_name, + function_name=fn_name, + flat_args=list(filtered_flat_args), + captured_inputs=concrete_fn.captured_inputs, + stage=stage, + ) + if stage in ("hlo_serialized", "optimized_hlo_serialized", + "optimized_hlo_proto_serialized"): + return res_bytes + else: + return res_bytes.decode("utf-8") + + return compiler_ir_generator + + @property + def python_function(self): + """The python function wrapped in this tf.function.""" + return self._python_function + + @property + def input_signature(self): + return function_type_utils.to_input_signature(self._function_type) + + @property + def function_spec(self): + return function_type_utils.FunctionSpec( + self._function_type, + self._default_values, + False, + self._name, + self._jit_compile, + ) + + @property + def function_type(self): + return self._function_type + + def pretty_printed_concrete_signatures(self, verbose=True): + joiner = "\n\n" if verbose else "\n" + return joiner.join([ + c.pretty_printed_signature(verbose=verbose) + for c in self._list_all_concrete_functions() + ]) + + def _initialize_uninitialized_variables(self, initializers): + """Make and call a `ConcreteFunction` which initializes variables.""" + + if not initializers: + return + + var_is_initialized = _evaluate_var_is_initialized( + [v for v, _ in initializers]) + + def initialize_variables(): + op_map = object_identity.ObjectIdentityDictionary() + + inits = [] + for (v, init), is_initialized in zip(initializers, var_is_initialized): + with ops.init_scope(): + if is_initialized: + continue + inits.append(init) + + if inits: + op_map = lift_to_graph.lift_to_graph( + inits, ops.get_default_graph(), op_map=op_map) + for (v, init), is_initialized in zip(initializers, var_is_initialized): + with ops.init_scope(): + if is_initialized: + continue + v.assign(op_map[init], read_value=False) + + with ops.init_scope(): + # Note: using tracing compilation here avoids an infinite recursion. + # Most of the code in this function runs eagerly with init_scope, where + # autograph is not necessary. + options = tracing_compilation.TracingOptions( + initialize_variables, "initialize_variables", autograph=False + ) + return tracing_compilation.call_function(tracing_options=options) + + def get_initialization_function(self, *args, **kwargs): + """Returns a `ConcreteFunction` which initializes this function's variables. + + Requires that this function hasn't been accessed yet through either calling + it or calling get_concrete_function. Fails if we cannot build an initializer + function which does not depend on the concrete values of the inputs to this + function. + + Note that running this function will overwrite any values currently assigned + to variables, for example restores from a checkpoint. + + Args: + *args: arguments to the underlying python callable. + **kwargs: keyword arguments to the python callable. + + Returns: + A `ConcreteFunction` object which initializes the variables of this + function. + + Raises: + RuntimeError: if called after the variables have been initialized. + """ + with self._lock: + if self._variable_creation_config is not None: + raise RuntimeError( + "get_initialization_function cannot be called after the function " + "has been used") + # Here we trace the function, collect the initializers, and attempt to + # extract them and run them eagerly. Fail only if we cannot do so. + initializers = [] + self._initialize(args, kwargs, add_initializers_to=initializers) + + def initialize_variables(): + for v, init in initializers: + v.assign( + lift_to_graph.lift_to_graph([init], ops.get_default_graph())[init], + read_value=False) + + # Note: using tracing compilation here avoids an infinite recursion. + options = tracing_compilation.TracingOptions( + initialize_variables, "initialize_variables" + ) + return tracing_compilation.trace_function(tracing_options=options) + + def _list_all_concrete_functions(self): + """Returns all concrete functions.""" + if self.input_signature is not None: + self.get_concrete_function() + return self._function_cache.values() + + def _list_all_concrete_functions_for_serialization(self): + """Returns all concrete functions for serialization. + + Returns: + A list of instances of `ConcreteFunction`. + """ + seen_signatures = [] + if self.input_signature is not None: + seen_signatures.append((self.input_signature, {})) + else: + concrete_functions = self._list_all_concrete_functions() + for concrete_function in concrete_functions: + signature = concrete_function.structured_input_signature + flattened = nest.flatten(signature) + if any( + isinstance(arg, func_graph_module.UnknownArgument) + for arg in flattened): + logging.info("Unsupported signature for serialization: %s.", + signature) + continue + equal_to_signature = functools.partial( + function_type_utils.is_same_structure, signature, check_values=True) + if not any(equal_to_signature(s) for s in seen_signatures): + seen_signatures.append(signature) + + # Re-create concrete functions for these signatures. Re-creating ensures + # that if the cache key has changed, the function will be traced again. + concrete_functions = [] + for args, kwargs in seen_signatures: + concrete_functions.append(self.get_concrete_function(*args, **kwargs)) + return concrete_functions + + def _trackable_children(self, save_type="checkpoint", **kwargs): + """For implementing `Trackable`.""" + if save_type == "checkpoint": + return {} + return {f"trace_{n}": fn for n, fn in + enumerate(self._list_all_concrete_functions_for_serialization())} + + def _deserialization_dependencies(self, children): + """Returns concrete functions which must be loaded before this object.""" + return children + + def _get_concrete_function_garbage_collected(self, *args, **kwargs): + """Returns a `ConcreteFunction` specialized to inputs and execution context. + + Unlike `get_concrete_function(...)`, the graph will be deleted when the + returned function is deleted. It's useful to avoid creating a reference + cycle when you know for sure that the graph will be no longer used without + the returned function. + + Args: + *args: inputs to specialize on. + **kwargs: inputs to specialize on. + + Returns: + A TensorFlow function which takes exactly one `tf.Tensor` per argument. + + Raises: + ValueError: if this object has not yet been called on concrete values. + """ + with self._lock: + if self._variable_creation_config is None: + initializers = [] + self._initialize(args, kwargs, add_initializers_to=initializers) + self._initialize_uninitialized_variables(initializers) + + if self._created_variables: + # In this case we have created variables on the first call, so we run the + # version which is guaranteed to never create variables. + return tracing_compilation.trace_function( + args, + kwargs, + dataclasses.replace( + self._no_variable_creation_config, bind_graph_to_function=True + ), + ) + elif self._variable_creation_config is not None: + # In this case we have not created variables on the first call. So we can + # run the first trace but we should fail if variables are created. + concrete = tracing_compilation.trace_function( + args, + kwargs, + dataclasses.replace( + self._variable_creation_config, bind_graph_to_function=True + ), + ) + if self._created_variables: + raise ValueError("Creating variables on a non-first call to a function" + " decorated with tf.function.") + return concrete + + def get_concrete_function(self, *args, **kwargs): + # Implements PolymorphicFunction.get_concrete_function. + concrete = self._get_concrete_function_garbage_collected(*args, **kwargs) + concrete._garbage_collector.release() # pylint: disable=protected-access + return concrete + + def __tf_tracing_type__(self, _): + return trace_type.Weakref(weakref.ref(self)) + + def __get__(self, instance, owner): + """Makes it possible to decorate instance methods.""" + del owner + # `instance` here is the instance that this `Function` was accessed through + # e.g., for + # + # class Foo: + # + # @tf.function + # def bar(self): + # ... + # + # foo = Foo() + # foo.bar() # `foo.bar` is a `Function` instance + # + # then `instance` will be `foo` (and `owner` will be `Foo`). For composite + # tensors, we can just treat `instance` as a normal parameter. But for + # other types, we create a new instance of `Function` here to allow + # different instances each to create variables once, thereby allowing + # methods to be decorated with tf.function. Keeps a cache to avoid retracing + # the function every time the descriptor is accessed. + # TODO(mdan): Identify types which can just be parameters more generically. + # + # The check for instance._type_spec=None is used because certain classes + # (including subclasses of tf.linalg.LinearOperator) are subclasses of + # CompositeTensor but do not actually implement the required APIs. + # TODO(b/199278478): Fix those classes, then remove the check for + # `instance._type_spec is not None`. + if (isinstance(instance, composite_tensor.CompositeTensor) and + instance._type_spec is not None): # pylint: disable=protected-access + return types_lib.MethodType(self, instance) + if instance not in self._descriptor_cache: + if instance is None: + return self + # TODO(mdan): If the CompositeTensor path works, do the same here. + # It's unclear whether we need the tf-decorator, or could just call + # MethodType(self.clone(), instance) + self._descriptor_cache[instance] = ( + class_method_to_instance_method(self, instance)) + return self._descriptor_cache[instance] + + +@tf_export("function") +@deprecation.deprecated_args(None, + "experimental_compile is deprecated, use " + "jit_compile instead", "experimental_compile") +@deprecation.deprecated_args(None, + "experimental_relax_shapes is deprecated, use " + "reduce_retracing instead", + "experimental_relax_shapes") +@deprecation.deprecated_args(None, + "experimental_follow_type_hints is deprecated", + "experimental_follow_type_hints") +def function( + func=None, + input_signature=None, + autograph=True, + jit_compile=None, + reduce_retracing=False, + experimental_implements=None, + experimental_autograph_options=None, + experimental_attributes=None, + experimental_relax_shapes=None, + experimental_compile=None, + experimental_follow_type_hints=None # pylint: disable=unused-argument +) -> core.PolymorphicFunction: + """Compiles a function into a callable TensorFlow graph. + + `tf.function` constructs a `tf.types.experimental.PolymorphicFunction` that + executes a TensorFlow graph (`tf.Graph`) created by trace-compiling the + TensorFlow operations in `func`. More information on the topic can be found + in [Introduction to Graphs and tf.function] + (https://www.tensorflow.org/guide/intro_to_graphs). + + See [Better Performance with tf.function] + (https://www.tensorflow.org/guide/function) for tips on performance and + known limitations. + + Example usage: + + >>> @tf.function + ... def f(x, y): + ... return x ** 2 + y + >>> x = tf.constant([2, 3]) + >>> y = tf.constant([3, -2]) + >>> f(x, y) + + + The trace-compilation allows non-TensorFlow operations to execute, but under + special conditions. In general, only TensorFlow operations are guaranteed to + run and create fresh results whenever the `PolymorphicFunction` is called. + + ## Features + + `func` may use data-dependent Python control flow statements, including `if`, + `for`, `while` `break`, `continue` and `return`: + + >>> @tf.function + ... def f(x): + ... if tf.reduce_sum(x) > 0: + ... return x * x + ... else: + ... return -x // 2 + >>> f(tf.constant(-2)) + + + `func`'s closure may include `tf.Tensor` and `tf.Variable` objects: + + >>> @tf.function + ... def f(): + ... return x ** 2 + y + >>> x = tf.constant([-2, -3]) + >>> y = tf.Variable([3, -2]) + >>> f() + + + `func` may also use ops with side effects, such as `tf.print`, `tf.Variable` + and others: + + >>> v = tf.Variable(1) + >>> @tf.function + ... def f(x): + ... for i in tf.range(x): + ... v.assign_add(i) + >>> f(3) + >>> v + + + Important: Any Python side-effects (appending to a list, printing with + `print`, etc) will only happen once, when `func` is traced. To have + side-effects executed into your `tf.function` they need to be written + as TF ops: + + >>> l = [] + >>> @tf.function + ... def f(x): + ... for i in x: + ... l.append(i + 1) # Caution! Will only happen once when tracing + >>> f(tf.constant([1, 2, 3])) + >>> l + [] + + Instead, use TensorFlow collections like `tf.TensorArray`: + + >>> @tf.function + ... def f(x): + ... ta = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True) + ... for i in range(len(x)): + ... ta = ta.write(i, x[i] + 1) + ... return ta.stack() + >>> f(tf.constant([1, 2, 3])) + + + ## `tf.function` creates polymorphic callables + + Internally, `tf.types.experimental.PolymorphicFunction` may contain multiple + `tf.types.experimental.ConcreteFunction`s, each specialized to arguments with + different data types or shapes, since TensorFlow can perform more + optimizations on graphs of specific shapes, dtypes and values of constant + arguments. `tf.function` treats any pure Python values as opaque objects (best + thought of as compile-time constants), and builds a separate `tf.Graph` for + each set of Python arguments that it encounters. + For more information, see the + [tf.function guide](https://www.tensorflow.org/guide/function#rules_of_tracing) + + Executing a `PolymorphicFunction` will select and execute the appropriate + `ConcreteFunction` based on the argument types and values. + + To obtain an individual `ConcreteFunction`, use the + `PolymorphicFunction.get_concrete_function` method. It can be called with the + same arguments as `func` and returns a + `tf.types.experimental.ConcreteFunction`. `ConcreteFunction`s are backed by a + single `tf.Graph`: + + >>> @tf.function + ... def f(x): + ... return x + 1 + >>> isinstance(f.get_concrete_function(1).graph, tf.Graph) + True + + `ConcreteFunction`s can be executed just like `PolymorphicFunction`s, but their + input is resticted to the types to which they're specialized. + + ## Retracing + + `ConcreteFunctions` are built (traced) on the fly, as the `PolymorphicFunction` is + called with new TensorFlow types or shapes, or with new Python values as + arguments. When `PolymorphicFunction` builds a new trace, it is said that `func` + is retraced. Retracing is a frequent performance concern for `tf.function` as + it can be considerably slower than executing a graph that's already been + traced. It is ideal to minimize the amount of retracing in your code. + + Caution: Passing python scalars or lists as arguments to `tf.function` will + usually retrace. To avoid this, pass numeric arguments as Tensors whenever + possible: + + >>> @tf.function + ... def f(x): + ... return tf.abs(x) + >>> f1 = f.get_concrete_function(1) + >>> f2 = f.get_concrete_function(2) # Slow - compiles new graph + >>> f1 is f2 + False + >>> f1 = f.get_concrete_function(tf.constant(1)) + >>> f2 = f.get_concrete_function(tf.constant(2)) # Fast - reuses f1 + >>> f1 is f2 + True + + Python numerical arguments should only be used when they take few distinct + values, such as hyperparameters like the number of layers in a neural network. + + ## Input signatures + + For Tensor arguments, `PolymorphicFunction`creates a new `ConcreteFunction` for + every unique set of input shapes and datatypes. The example below creates two + separate `ConcreteFunction`s, each specialized to a different shape: + + >>> @tf.function + ... def f(x): + ... return x + 1 + >>> vector = tf.constant([1.0, 1.0]) + >>> matrix = tf.constant([[3.0]]) + >>> f.get_concrete_function(vector) is f.get_concrete_function(matrix) + False + + An "input signature" can be optionally provided to `tf.function` to control + this process. The input signature specifies the shape and type of each + Tensor argument to the function using a `tf.TensorSpec` object. More general + shapes can be used. This ensures only one `ConcreteFunction` is created, and + restricts the `PolymorphicFunction` to the specified shapes and types. It is + an effective way to limit retracing when Tensors have dynamic shapes. + + >>> @tf.function( + ... input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) + ... def f(x): + ... return x + 1 + >>> vector = tf.constant([1.0, 1.0]) + >>> matrix = tf.constant([[3.0]]) + >>> f.get_concrete_function(vector) is f.get_concrete_function(matrix) + True + + ## Variables may only be created once + + `tf.function` only allows creating new `tf.Variable` objects when it is called + for the first time: + + >>> class MyModule(tf.Module): + ... def __init__(self): + ... self.v = None + ... + ... @tf.function + ... def __call__(self, x): + ... if self.v is None: + ... self.v = tf.Variable(tf.ones_like(x)) + ... return self.v * x + + In general, it is recommended to create `tf.Variable`s outside of + `tf.function`. + In simple cases, persisting state across `tf.function` boundaries may be + implemented using a pure functional style in which state is represented by + `tf.Tensor`s passed as arguments and returned as return values. + + Contrast the two styles below: + + >>> state = tf.Variable(1) + >>> @tf.function + ... def f(x): + ... state.assign_add(x) + >>> f(tf.constant(2)) # Non-pure functional style + >>> state + + + >>> state = tf.constant(1) + >>> @tf.function + ... def f(state, x): + ... state += x + ... return state + >>> state = f(state, tf.constant(2)) # Pure functional style + >>> state + + + ## Python operations execute only once per trace + + `func` may contain TensorFlow operations mixed with pure Python operations. + However, when the function is executed, only the TensorFlow operations will + run. The Python operations run only once, at trace time. If TensorFlow + operations depend on results from Python operations, those results will be + frozen into the graph. + + >>> @tf.function + ... def f(a, b): + ... print('this runs at trace time; a is', a, 'and b is', b) + ... return b + >>> f(1, tf.constant(1)) + this runs at trace time; a is 1 and b is Tensor("...", shape=(), dtype=int32) + + + >>> f(1, tf.constant(2)) + + + >>> f(2, tf.constant(1)) + this runs at trace time; a is 2 and b is Tensor("...", shape=(), dtype=int32) + + + >>> f(2, tf.constant(2)) + + + Args: + func: The function to be compiled. If `func` is None, `tf.function` returns + a decorator that can be invoked with a single argument - `func`. In other + words, `tf.function(input_signature=...)(func)` is equivalent to + `tf.function(func, input_signature=...)`. The former can be used as + decorator. + input_signature: A possibly nested sequence of `tf.TensorSpec` objects + specifying the shapes and dtypes of the Tensors that will be supplied to + this function. If `None`, a separate function is instantiated for each + inferred input signature. If input_signature is specified, every input to + `func` must be a `Tensor`, and `func` cannot accept `**kwargs`. + autograph: Whether autograph should be applied on `func` before tracing a + graph. Data-dependent Python control flow statements require + `autograph=True`. For more information, see the + [tf.function and AutoGraph guide]( + https://www.tensorflow.org/guide/function#autograph_transformations). + jit_compile: If `True`, compiles the function using + [XLA](https://tensorflow.org/xla). XLA performs compiler optimizations, + such as fusion, and attempts to emit more efficient code. This may + drastically improve the performance. If set to `True`, + the whole function needs to be compilable by XLA, or an + `errors.InvalidArgumentError` is thrown. + If `None` (default), compiles the function with XLA when running on TPU + and goes through the regular function execution path when running on + other devices. + If `False`, executes the function without XLA compilation. Set this value + to `False` when directly running a multi-device function on TPUs (e.g. two + TPU cores, one TPU core and its host CPU). + Not all functions are compilable, see a list of + [sharp corners](https://tensorflow.org/xla/known_issues). + reduce_retracing: When True, `tf.function` attempts to reduce the + amount of retracing, for example by using more generic shapes. This + can be controlled for user objects by customizing their associated + `tf.types.experimental.TraceType`. + experimental_implements: If provided, contains a name of a "known" function + this implements. For example "mycompany.my_recurrent_cell". + This is stored as an attribute in inference function, + which can then be detected when processing serialized function. + See [standardizing composite ops](https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md) # pylint: disable=line-too-long + for details. For an example of utilizing this attribute see this + [example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc) + The code above automatically detects and substitutes function that + implements "embedded_matmul" and allows TFLite to substitute its own + implementations. For instance, a tensorflow user can use this + attribute to mark that their function also implements + `embedded_matmul` (perhaps more efficiently!) + by specifying it using this parameter: + `@tf.function(experimental_implements="embedded_matmul")` + This can either be specified as just the string name of the function or + a NameAttrList corresponding to a list of key-value attributes associated + with the function name. The name of the function will be in the 'name' + field of the NameAttrList. To define a formal TF op for this function + implements, try the experimental [composite TF](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/tfr) + project. + experimental_autograph_options: Optional tuple of + `tf.autograph.experimental.Feature` values. + experimental_attributes: Optional dictionary of attributes to include in the + generated FunctionDefs. + experimental_relax_shapes: Deprecated. Use `reduce_retracing` + instead. + experimental_compile: Deprecated alias to 'jit_compile'. + experimental_follow_type_hints: Deprecated. Please use input_signature or + reduce_retracing instead. + + Returns: + If `func` is not None, returns a `tf.types.experimental.PolymorphicFunction`. + If `func` is None, returns a decorator that, when invoked with a single + `func` argument, returns a `tf.types.experimental.PolymorphicFunction`. + + Raises: + `ValueError` when attempting to use `jit_compile=True`, but XLA support is + not available. + """ + if jit_compile is None and JIT_COMPILE_FUNCTIONS: + jit_compile = True + + # TODO(b/224808187): Remove after renaming usages. + if experimental_relax_shapes: + reduce_retracing = True + + def decorated(inner_function): + try: + name = inner_function.__name__ + except AttributeError: + name = "function" + return tf_decorator.make_decorator( + inner_function, + decorator_name="tf.function", + decorator_func=Function( + inner_function, + name, + input_signature=input_signature, + autograph=autograph, + experimental_autograph_options=experimental_autograph_options, + reduce_retracing=reduce_retracing, + + # TODO(b/171825496): Update once `experimental_compile` is removed + # entirely in favor of 'jit_compile'. + jit_compile=deprecation.deprecated_argument_lookup( + "jit_compile", + jit_compile, + "experimental_compile", + experimental_compile), + experimental_implements=experimental_implements, + experimental_attributes=experimental_attributes)) + + # This code path is for the `foo = tf.function(foo, ...)` use case + if func is not None: + return decorated(func) + + # This code path is for the + # + # @tf.function(...) + # def foo(...): + # ... + # + # use case, which is equivalent to `foo = tf.function(...)(foo)` + return decorated + + +def class_method_to_instance_method(original_function, instance): + """Constructs a new `Function` with `self` bound.""" + weak_instance = weakref.ref(instance) + + # Note: while we could bind to a weakref proxy instead, that causes the + # bound method to be unhashable. + bound_method = types_lib.MethodType( + original_function.python_function, + tf_method_target.TfMethodTarget(weak_instance, + original_function.python_function)) + + # original_function is expected to be PolymorphicFunction + assert hasattr(original_function, "_name") + assert hasattr(original_function, "_autograph") + assert hasattr(original_function, "_function_type") + assert hasattr(original_function, "python_function") + + weak_bound_method_wrapper = None + + def bound_method_wrapper(*args, **kwargs): + """Wraps either a dummy MethodType or a converted AutoGraph function.""" + # __wrapped__ allows AutoGraph to swap in a converted function. + strong_bound_method_wrapper = weak_bound_method_wrapper() + wrapped_fn = strong_bound_method_wrapper.__wrapped__ + + if wrapped_fn is strong_bound_method_wrapper.__original_wrapped__: + # If __wrapped__ was not replaced, then call original_function. + # TODO(mdan): For better consistency, use the wrapper's call(). + wrapped_fn = original_function.python_function + return wrapped_fn(weak_instance(), *args, **kwargs) + + # If __wrapped__ was replaced, then it is always an unbound function. + # However, the replacer is still responsible for attaching self properly. + # TODO(mdan): Is it possible to do it here instead? + return wrapped_fn(*args, **kwargs) + + weak_bound_method_wrapper = weakref.ref(bound_method_wrapper) + + # pylint: disable=protected-access + # We make a dummy MethodType object to generate the correct bound method + # signature. The actual call is to a function with a weak reference to + # `instance`. + instance_func = type(original_function)( + tf_decorator.make_decorator(bound_method, bound_method_wrapper), + name=original_function._name, + autograph=original_function._autograph, + input_signature=original_function.input_signature, + reduce_retracing=original_function._reduce_retracing, + jit_compile=original_function._jit_compile, + experimental_attributes=original_function._attributes) + # pylint: enable=protected-access + + # We wrap the bound method with tf_decorator so inspection works correctly + wrapped_instance_func = tf_decorator.make_decorator(bound_method, + instance_func) + return wrapped_instance_func diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/saved_model_exported_concrete.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/saved_model_exported_concrete.py new file mode 100644 index 0000000000000000000000000000000000000000..84404fd1fcd607062180c55d06d368433833b387 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/saved_model_exported_concrete.py @@ -0,0 +1,121 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""ExportedConcreteFunction class and its associated functions. + +Part of saved model utils, a shim layer for working with +functions exported/restored from saved models. +This functionality should ultimately be moved into a first-class core API. +""" + +import gc +from tensorflow.python.eager.polymorphic_function import function_type_utils +from tensorflow.python.trackable import base as trackable + + +# TODO(kathywu): Delete this class when ConcreteFunctions can be copied with new +# captures. +class ExportedConcreteFunction(trackable.Trackable): + """A callable class that uses captures from the exported SavedModel graph.""" + __slots__ = ("function", "tensor_map") + + def __init__(self, function, tensor_map): + self.function = function + self.tensor_map = tensor_map + + def __call__(self, *args, **kwargs): + bound_arguments = function_type_utils.canonicalize_function_inputs( + args, kwargs, self.function._function_type + ) + filtered_flat_args = self.function._function_type.unpack_inputs( + bound_arguments + ) + export_captures = _map_captures_to_created_tensors( + self.function.graph.captures, self.tensor_map, self.function) + return self.function._call_flat(filtered_flat_args, export_captures) + + +def _map_captures_to_created_tensors(original_captures, tensor_map, function): + """Maps eager tensors captured by a function to Graph resources for export. + + Args: + original_captures: A dictionary mapping from tensors captured by the + function to interior placeholders for those tensors (inside the function + body). + tensor_map: A dictionary mapping from resource tensors owned by the eager + context to resource tensors in the exported graph. + function: Function with the original captures. Only used when raising the + AssertionError. + + Returns: + A list of stand-in tensors which belong to the exported graph, corresponding + to the function's captures. + + Raises: + AssertionError: If the function references a resource which is not part of + `tensor_map`. + """ + export_captures = [] + for exterior, interior in original_captures: + mapped_resource = tensor_map.get(exterior, None) + if mapped_resource is None: + _raise_untracked_capture_error(function.name, exterior, interior) + export_captures.append(mapped_resource) + return export_captures + + +def _raise_untracked_capture_error(function_name, capture, + internal_capture=None, + node_path=None): + """Raises AssertionError due to being unable to export a function.""" + msg = ("Tried to export a function which references an 'untracked' resource. " + "TensorFlow objects (e.g. tf.Variable) captured by functions must be " + "'tracked' by assigning them to an attribute of a tracked object or " + "assigned to an attribute of the main object directly. See the " + "information below:" + f"\n\tFunction name = {function_name}") + + if node_path is not None: + msg += f"\n\tPath to Function = {node_path}" + + msg += f"\n\tCaptured Tensor = {capture}" + msg += f"\n\t{_get_trackable_parent_error_string(capture)}" + + if internal_capture is not None: + msg += f"\n\tInternal Tensor = {internal_capture}" + raise AssertionError(msg) + + +def _get_trackable_parent_error_string(capture): + """Gets error string with the capture's parent object.""" + parent = getattr(capture, "_parent_trackable", None) + if parent is not None: + return f"Trackable referencing this tensor = {parent()}" + + # Try to figure out where the resource came from by iterating over objects + # which reference it. This is slow and doesn't help us figure out how to + # match it to other objects when loading the SavedModel as a checkpoint, + # so we can't continue saving. But we can at least tell the user what + # needs attaching. + trackable_referrers = [] + for primary_referrer in gc.get_referrers(capture): + if isinstance(primary_referrer, trackable.Trackable): + trackable_referrers.append(primary_referrer) + for secondary_referrer in gc.get_referrers(primary_referrer): + if isinstance(secondary_referrer, trackable.Trackable): + trackable_referrers.append(secondary_referrer) + return ("Trackable Python objects referring to this tensor " + "(from gc.get_referrers, limited to two hops) = [\n\t\t{}]" + .format("\n\t\t".join([repr(obj) for obj in trackable_referrers]))) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/saved_model_utils.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/saved_model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a42f572bd0c14cb898d3f603323fc044b5616418 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/saved_model_utils.py @@ -0,0 +1,82 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""A shim layer for working with functions exported/restored from saved models. + +This functionality should ultimately be moved into a first-class core API. +""" + +import numpy + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.saved_model import registration +from tensorflow.python.trackable import base as trackable + + +@registration.register_tf_serializable() +class TrackableConstant(trackable.Trackable): + """Trackable class for captured constants.""" + __slots__ = ("capture", "function", "_exported_tensor") + + def __init__(self, capture, function): + self.capture = capture + self.function = function + self._exported_tensor = None + + def _export_to_saved_model_graph(self, tensor_map, **unused_kwargs): + capture_constant_value = tensor_util.constant_value(self.capture) + if capture_constant_value is None: + raise ValueError( + f"Unable to save function {self.function.name} because it " + f"captures graph tensor {self.capture} from a parent function which " + "cannot be converted to a constant with `tf.get_static_value`.") + + if numpy.prod(self.capture.shape.as_list()) > 1 and numpy.all( + capture_constant_value == capture_constant_value.flat[0]): + # For the common case of a constant array filled with the same + # value, rebuild the constant op specifically with the shape arg, + # since otherwise the whole array is written into the node def, + # causing performance and graph proto size issues (protos cannot be + # bigger than 2GB). + copied_tensor = constant_op.constant( + capture_constant_value.flat[0], + dtype=self.capture.dtype, + shape=self.capture.shape) + else: + copied_tensor = constant_op.constant(capture_constant_value) + + tensor_map[self.capture] = copied_tensor + self._exported_tensor = copied_tensor + return [self.capture] + + def _serialize_to_proto(self, object_proto=None, **kwargs): + object_proto.constant.operation = self._exported_tensor.op.name + + @classmethod + def _deserialize_from_proto(cls, object_proto, operation_attributes, + **kwargs): + tensor_proto = ( + operation_attributes[object_proto.constant.operation]["value"].tensor) + ndarray = tensor_util.MakeNdarray(tensor_proto) + if dtypes.as_dtype(tensor_proto.dtype) == dtypes.string: + with ops.device("CPU"): + # String operations should be done on the CPU. + imported_constant = constant_op.constant(ndarray) + else: + imported_constant = constant_op.constant(ndarray) + return imported_constant diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/tf_method_target.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/tf_method_target.py new file mode 100644 index 0000000000000000000000000000000000000000..23b9a127366689a64640d7d2f2315ecf23dbff2d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/tf_method_target.py @@ -0,0 +1,51 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module for the TFMethodTarget Class.""" + +import weakref + +from tensorflow.python.util import tf_inspect + + +# When a method is bound to objects of this type, it allows AutoGraph to +# recover a weak reference the original method's self pointer, so that it can +# execute it consistent with class_method_to_instance_method's +# bound_method_wrapper. +# TODO(b/119246461): This is not pretty. Use a descriptor instead? +class TfMethodTarget: + """Binding target for methods replaced by function and defun.""" + + __slots__ = ("weakrefself_target__", "weakrefself_func__") + + def __init__(self, target, original_python_function): + self.weakrefself_target__ = target + self.weakrefself_func__ = weakref.ref(original_python_function) + + @property + def target(self): + return self.weakrefself_target__() + + @property + def target_class(self): + true_self = self.weakrefself_target__() + if tf_inspect.isclass(true_self): + # Class method + return true_self + else: + return true_self.__class__ + + def call(self, args, kwargs): + wrapped_fn = self.weakrefself_func__() + return wrapped_fn(self.weakrefself_target__(), *args, **kwargs) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py new file mode 100644 index 0000000000000000000000000000000000000000..7cf1642bfa7bb12dbbe12eaa1203cc437aa085cd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py @@ -0,0 +1,379 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Compile Python functions to TF graphs using tracing.""" + +import contextlib +import dataclasses +import enum +import threading +from typing import Any, Callable, Dict, Optional, Tuple + +from tensorflow.core.function import trace_type +from tensorflow.core.function.capture import capture_container +from tensorflow.core.function.polymorphism import function_cache as function_cache_lib +from tensorflow.core.function.polymorphism import function_type as function_type_lib +from tensorflow.python.autograph.core import ag_ctx +from tensorflow.python.eager import monitoring +from tensorflow.python.eager.polymorphic_function import attributes as attributes_lib +from tensorflow.python.eager.polymorphic_function import concrete_function as concrete_function_lib +from tensorflow.python.eager.polymorphic_function import function_context +from tensorflow.python.eager.polymorphic_function import function_type_utils +from tensorflow.python.eager.polymorphic_function import transform +from tensorflow.python.framework import func_graph as func_graph_module +from tensorflow.python.framework import ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.profiler import trace +from tensorflow.python.util import compat + +_graph_building_time_counter = monitoring.Counter( + "/tensorflow/core/tf_function/graph_building_time_usecs", + "Time for tf.function to build a graph (us).", +) + + +class ScopeType(enum.Enum): + """Enumerate scopes under which functions might be traced.""" + NO_SCOPE = 1 + VARIABLE_CREATION = 2 + NO_VARIABLE_CREATION = 3 + + +@dataclasses.dataclass +class TracingOptions: + """Configuration options for tracing.""" + # Python function to trace. + python_function: Callable[[Any], Any] = lambda *args, **kwargs: None + + # Name given to the traced function. + name: str = "function" + + # Known FunctionType of the python function. + polymorphic_type: Optional[function_type_lib.FunctionType] = None + + # Known default values for the python function parameters. + default_values: Optional[Dict[str, Any]] = None + + # Identifies effecting scope under which the function is traced. + scope_type: ScopeType = ScopeType.NO_SCOPE + + # FunctionDef attributes for traced function. + attributes: Optional[Dict[str, Any]] = None + + # See https://www.tensorflow.org/guide/autograph for more information. + # If autograph is enabled. + autograph: bool = True + # Optional tuple of `tf.autograph.experimental.Feature` values. + autograph_options: Optional[Tuple[Any, ...]] = None + + # Trace generalized functions where possible to avoid future retracing. + reduce_retracing: bool = False + + # If true, graph of generated Function will be destroyed with the function. + bind_graph_to_function: bool = False + + # A FunctionCache object that holds existing traced functions. + function_cache: Optional[function_cache_lib.FunctionCache] = None + + # A FunctionCaptures object that tracks by-ref captures. + function_captures: Optional[capture_container.FunctionCaptures] = None + + # If specified, guards tracing and function lookup + lock: Optional[threading.Lock] = None + + def __post_init__(self): + if self.attributes: + for attribute in self.attributes: + if attribute not in attributes_lib.TRACING_COMPILATION_ALLOWLIST: + raise ValueError( + f"Tracing compilation does not support `{attribute}` as an" + " attribute." + ) + + if not self.polymorphic_type or self.default_values is None: + self.polymorphic_type = function_type_lib.FunctionType.from_callable( + self.python_function + ) + self.default_values = function_type_lib.FunctionType.get_default_values( + self.python_function + ) + + self._input_signature = function_type_utils.to_input_signature( + self.polymorphic_type + ) + + @property + def is_pure(self): + return self.attributes and attributes_lib.IMPLEMENTS in self.attributes + + @property + def input_signature(self): + return self._input_signature + + +def call_function(args=None, kwargs=None, tracing_options=None): + """Traces a function for args and kwargs and calls it after.""" + if not tracing_options: + tracing_options = TracingOptions() + + args = args if args else () + kwargs = kwargs if kwargs else {} + function = trace_function( + args=args, kwargs=kwargs, tracing_options=tracing_options + ) + + # Bind it ourselves to skip unnecessary canonicalization of default call. + bound_args = function.function_type.bind(*args, **kwargs) + flat_inputs = function.function_type.unpack_inputs(bound_args) + return function._call_flat( # pylint: disable=protected-access + flat_inputs, captured_inputs=function.captured_inputs + ) + + +def trace_function(args=None, kwargs=None, tracing_options=None): + """Returns a `ConcreteFunction` specialized to inputs and execution context. + + Compiles a Graph corresponding to the Python function logic and uses that + to generate a differentiable ConcreteFunction. + + Args: + args: inputs to specialize on. Can be concrete values (e.g. 1) or + `tf.Tensor` or `tf.TensorSpec`. + kwargs: keyword inputs to specialize on. Concrete values (e.g. 1) or + `tf.Tensor` or `tf.TensorSpec`. + tracing_options: TracingOptions for the tracing process. + """ + if not tracing_options: + tracing_options = TracingOptions() + + args = args if args else () + kwargs = kwargs if kwargs else {} + + if tracing_options.input_signature and (args or kwargs): + # Check to see if a valid type can be generated from the args, kwargs + bound_args = function_type_utils.bind_function_inputs( + args, + kwargs, + tracing_options.polymorphic_type, + tracing_options.default_values, + ) + args, kwargs = bound_args.args, bound_args.kwargs + + with tracing_options.lock or contextlib.nullcontext(): + if tracing_options.input_signature and not args and not kwargs: + args = tracing_options.input_signature + kwargs = {} + + concrete_function = _maybe_define_function( + args, kwargs, tracing_options + ) + + if not tracing_options.bind_graph_to_function: + concrete_function._garbage_collector.release() # pylint: disable=protected-access + + return concrete_function + + +def _maybe_define_function(args, kwargs, tracing_options): + """Gets a function for these inputs, defining it if necessary. + + Args: + args: The varargs for the Python function. + kwargs: The keyword args for the Python function. + tracing_options: TracingOptions for the tracing process. + + Returns: + A ConcreteFunction generated based on args, kwargs and tracing_options. + + Raises: + ValueError: If inputs are incompatible with the input signature. + TypeError: If the function inputs include non-hashable objects + RuntimeError: If there's an internal bug (inconsistency) in handling + shape relaxation retracing. + """ + bound_args = function_type_utils.canonicalize_function_inputs( + args, + kwargs, + tracing_options.polymorphic_type, + tracing_options.default_values, + tracing_options.is_pure, + ) + args, kwargs = bound_args.args, bound_args.kwargs + + if tracing_options.input_signature is not None: + args = ( + *tracing_options.input_signature, + *args[len(tracing_options.input_signature) :], + ) + + current_func_context = function_context.make_function_context( + tracing_options.scope_type + ) + + capture_types = ( + tracing_options.function_captures.capture_types + if tracing_options.function_captures + else {} + ) + lookup_func_type, lookup_func_context = ( + function_type_utils.make_canonicalized_monomorphic_type( + args, + kwargs, + capture_types, + tracing_options.polymorphic_type, + ) + ) + + if tracing_options.function_cache is not None: + concrete_function = tracing_options.function_cache.lookup( + lookup_func_type, current_func_context + ) + else: + concrete_function = None + + if concrete_function is not None: + return concrete_function + + # Use a timer for graph building only if not already inside a function. This + # avoids double counting graph building time for nested functions. + with monitoring.MonitoredTimer( + _graph_building_time_counter.get_cell() + ) if not ops.inside_function() else contextlib.nullcontext(): + with trace.Trace("tf.function-graph_building"): + logging.vlog( + 1, + "Creating new FuncGraph for Python function %r (key: %r, %r)", + tracing_options.python_function, + current_func_context, + lookup_func_type, + ) + logging.vlog( + 2, "Python function signature [args: %s] [kwargs: %s]", args, kwargs + ) + ag_status = ( + ag_ctx.Status.ENABLED + if tracing_options.autograph + else ag_ctx.Status.DISABLED + ) + with ag_ctx.ControlStatusCtx( + status=ag_status, options=tracing_options.autograph_options + ): + func_graph = func_graph_module.FuncGraph(tracing_options.name) + if ( + tracing_options.input_signature is None + and tracing_options.reduce_retracing + and tracing_options.function_cache + ): + target_func_type = tracing_options.function_cache.generalize( + current_func_context, lookup_func_type + ) + else: + target_func_type = lookup_func_type + concrete_function = _create_concrete_function( + target_func_type, lookup_func_context, func_graph, tracing_options + ) + + if tracing_options.function_cache is not None: + tracing_options.function_cache.add( + concrete_function, current_func_context + ) + + return concrete_function + + +def _create_concrete_function( + function_type, type_context, func_graph, tracing_options +): + """Create a `ConcreteFunction` from `args`, `kwargs`, and `func_graph`.""" + placeholder_context = trace_type.InternalPlaceholderContext( + func_graph, type_context.get_placeholder_mapping() + ) + with func_graph.as_default(): + placeholder_bound_args = function_type.placeholder_arguments( + placeholder_context + ) + + disable_acd = tracing_options.attributes and tracing_options.attributes.get( + attributes_lib.DISABLE_ACD, False + ) + traced_func_graph = func_graph_module.func_graph_from_py_func( + tracing_options.name, + tracing_options.python_function, + placeholder_bound_args.args, + placeholder_bound_args.kwargs, + None, + func_graph=func_graph, + add_control_dependencies=not disable_acd, + arg_names=function_type_utils.to_arg_names(function_type), + create_placeholders=False, + ) + + transform.apply_func_graph_transforms(traced_func_graph) + + graph_capture_container = traced_func_graph.function_captures + + if tracing_options.function_captures: + # Maintain the list of all captures + tracing_options.function_captures.merge_by_ref_with(graph_capture_container) + + # Create a new FunctionType including captures and outputs. + output_type = trace_type.from_value( + traced_func_graph.structured_outputs, type_context + ) + traced_func_type = function_type_lib.FunctionType( + function_type.parameters.values(), + traced_func_graph.function_captures.capture_types, + return_annotation=output_type, + ) + + concrete_function = concrete_function_lib.ConcreteFunction.from_func_graph( + traced_func_graph, + traced_func_type, + tracing_options.attributes, + # Tell the ConcreteFunction to clean up its graph once it goes out of + # scope. This is not the default behavior since it gets used in some + # places (like Keras) where the FuncGraph lives longer than the + # ConcreteFunction. + shared_func_graph=False, + ) + _set_arg_keywords(concrete_function) + transform.call_concrete_function_callbacks(concrete_function) + + return concrete_function + + +def _set_arg_keywords(concrete_function): + """Sets arg keywords for ConcreteFunction.""" + seen_names = set() + concrete_function._arg_keywords = [] # pylint: disable=protected-access + prefix_counts = {} + graph = concrete_function.graph + num_captures = len(graph.internal_captures + graph.deferred_internal_captures) + num_positional = len(graph.inputs) - num_captures + for arg in concrete_function.graph.inputs[:num_positional]: + try: + user_arg_name = compat.as_str(arg.op.get_attr("_user_specified_name")) + except ValueError: + user_arg_name = "tensor_arg" + proposal = user_arg_name + while proposal in seen_names: + index = prefix_counts.get(user_arg_name, 1) + proposal = "{}_{}".format(user_arg_name, index) + prefix_counts[user_arg_name] = index + 1 + seen_names.add(proposal) + concrete_function._arg_keywords.append(proposal) # pylint: disable=protected-access + # Anything can be a positional argument, in the same order as .inputs + concrete_function._num_positional_args = ( # pylint: disable=protected-access + num_positional + ) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/transform.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..c7d1a30a2220b9824b5f9a2c110b8fb70a1c4d21 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/polymorphic_function/transform.py @@ -0,0 +1,32 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""tf.function transformations implementation.""" + +# TODO(fmuham): Move this logic to core/function when layered. +# TODO(fmuham): Deprecate and migrate these as AtomicFunction transformations. +FUNC_GRAPH_TRANSFORMS = [] +CONCRETE_FUNCTION_CALLBACKS = [] + + +def apply_func_graph_transforms(func_graph): + """Applies registered transformations to FuncGraph.""" + for transform in FUNC_GRAPH_TRANSFORMS: + transform(func_graph) + + +def call_concrete_function_callbacks(concrete_fn): + """Calls registered callbacks against new ConcreteFunctions.""" + for callback in CONCRETE_FUNCTION_CALLBACKS: + callback(concrete_fn) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/profiler.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..4da9206ec78822fa59efd0bb8dcbfaff024a998c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/profiler.py @@ -0,0 +1,193 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TensorFlow 2.0 Profiler for both Eager Mode and Graph Mode. + +The profiler has two mode: +- Programmatic Mode: start(), stop() and Profiler class. It will perform + when calling start() or create Profiler class and will stop + when calling stop() or destroying Profiler class. +- On-demand Mode: start_profiler_server(). It will perform profiling when + receive profiling request. + +NOTE: Only one active profiler session is allowed. Use of simultaneous +Programmatic Mode and On-demand Mode is undefined and will likely fail. + +NOTE: The Keras TensorBoard callback will automatically perform sampled +profiling. Before enabling customized profiling, set the callback flag +"profile_batches=[]" to disable automatic sampled profiling. +customized profiling. +""" + +import datetime +import os +import threading + +from tensorflow.python.client import _pywrap_events_writer +from tensorflow.python.eager import context +from tensorflow.python.framework import errors +from tensorflow.python.platform import gfile +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.profiler.internal import _pywrap_profiler +from tensorflow.python.util import compat +from tensorflow.python.util.deprecation import deprecated + +_profiler = None +_profiler_lock = threading.Lock() +_run_num = 0 +# This suffix should be kept in sync with kProfileEmptySuffix in +# tensorflow/core/profiler/rpc/client/capture_profile.cc. +_EVENT_FILE_SUFFIX = '.profile-empty' + + +class ProfilerAlreadyRunningError(Exception): + pass + + +class ProfilerNotRunningError(Exception): + pass + + +@deprecated('2020-07-01', 'use `tf.profiler.experimental.start` instead.') +def start(options=None): + """Start profiling. + + Args: + options: profiler options. + + Raises: + ProfilerAlreadyRunningError: If another profiling session is running. + """ + global _profiler + with _profiler_lock: + if _profiler is not None: + raise ProfilerAlreadyRunningError('Another profiler is running.') + if context.default_execution_mode == context.EAGER_MODE: + context.ensure_initialized() + _profiler = _pywrap_profiler.ProfilerSession() + try: + _profiler.start('', options if options is not None else {}) + except errors.AlreadyExistsError: + logging.warning('Another profiler session is running which is probably ' + 'created by profiler server. Please avoid using profiler ' + 'server and profiler APIs at the same time.') + raise ProfilerAlreadyRunningError('Another profiler is running.') + + +@deprecated('2020-07-01', 'use `tf.profiler.experimental.stop` instead.') +def stop(): + """Stop current profiling session and return its result. + + Returns: + A binary string of tensorflow.tpu.Trace. User can write the string + to file for offline analysis by tensorboard. + + Raises: + ProfilerNotRunningError: If there is no active profiling session. + """ + global _profiler + global _run_num + with _profiler_lock: + if _profiler is None: + raise ProfilerNotRunningError( + 'Cannot stop profiling. No profiler is running.') + if context.default_execution_mode == context.EAGER_MODE: + context.context().executor.wait() + result = _profiler.stop() + _profiler = None + _run_num += 1 + return result + + +@deprecated( + '2020-07-01', + '`tf.python.eager.profiler` has deprecated, use `tf.profiler` instead.' +) +def maybe_create_event_file(logdir): + """Create an empty event file if not already exists. + + This event file indicates that we have a plugins/profile/ directory in the + current logdir. + + Args: + logdir: log directory. + """ + for file_name in gfile.ListDirectory(logdir): + if file_name.endswith(_EVENT_FILE_SUFFIX): + return + # TODO(b/127330388): Use summary_ops_v2.create_file_writer instead. + event_writer = _pywrap_events_writer.EventsWriter( + compat.as_bytes(os.path.join(logdir, 'events'))) + event_writer.InitWithSuffix(compat.as_bytes(_EVENT_FILE_SUFFIX)) + + +@deprecated( + '2020-07-01', + '`tf.python.eager.profiler` has deprecated, use `tf.profiler` instead.' +) +def save(logdir, result): + """Save profile result to TensorBoard logdir. + + Args: + logdir: log directory read by TensorBoard. + result: profiling result returned by stop(). + """ + plugin_dir = os.path.join( + logdir, 'plugins', 'profile', + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) + gfile.MakeDirs(plugin_dir) + maybe_create_event_file(logdir) + with gfile.Open(os.path.join(plugin_dir, 'local.trace'), 'wb') as f: + f.write(result) + + +@deprecated('2020-07-01', 'use `tf.profiler.experimental.server.start`.') +def start_profiler_server(port): + """Start a profiler grpc server that listens to given port. + + The profiler server will keep the program running even the training finishes. + Please shutdown the server with CTRL-C. It can be used in both eager mode and + graph mode. The service defined in + tensorflow/core/profiler/profiler_service.proto. Please use + tensorflow/contrib/tpu/profiler/capture_tpu_profile to capture tracable + file following https://cloud.google.com/tpu/docs/cloud-tpu-tools#capture_trace + + Args: + port: port profiler server listens to. + """ + if context.default_execution_mode == context.EAGER_MODE: + context.ensure_initialized() + _pywrap_profiler.start_server(port) + + +@deprecated('2020-07-01', 'use `tf.profiler.experimental.Profile` instead.') +class Profiler(object): + """Context-manager eager profiler api. + + Example usage: + ```python + with Profiler("/path/to/logdir"): + # do some work + ``` + """ + + def __init__(self, logdir): + self._logdir = logdir + + def __enter__(self): + start() + + def __exit__(self, typ, value, tb): + result = stop() + save(self._logdir, result) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/profiler_client.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/profiler_client.py new file mode 100644 index 0000000000000000000000000000000000000000..4f92604abfbc2745c7248ab0a38df5ee139f6e09 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/profiler_client.py @@ -0,0 +1,69 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Profiler client APIs.""" + +from tensorflow.python.profiler.internal import _pywrap_profiler +from tensorflow.python.util.deprecation import deprecated + + +@deprecated('2020-07-01', 'use `tf.profiler.experimental.client.trace`.') +def start_tracing(service_addr, + logdir, + duration_ms, + worker_list='', + include_dataset_ops=True, + num_tracing_attempts=3): + """Sends grpc requests to profiler server to perform on-demand profiling. + + This method will block caller thread until receives tracing result. + + Args: + service_addr: Address of profiler service e.g. localhost:6009. + logdir: Path of TensorBoard log directory e.g. /tmp/tb_log. + duration_ms: Duration of tracing or monitoring in ms. + worker_list: The list of worker TPUs that we are about to profile in the + current session. (TPU only) + include_dataset_ops: Set to false to profile longer traces. + num_tracing_attempts: Automatically retry N times when no trace event is + collected. + + Raises: + UnavailableError: If no trace event is collected. + """ + _pywrap_profiler.trace(service_addr, logdir, worker_list, include_dataset_ops, + duration_ms, num_tracing_attempts, {}) + + +@deprecated('2020-07-01', 'use `tf.profiler.experimental.client.monitor`.') +def monitor(service_addr, + duration_ms, + monitoring_level=1, + display_timestamp=False): + """Sends grpc requests to profiler server to perform on-demand monitoring. + + This method will block caller thread until receives monitoring result. + + Args: + service_addr: Address of profiler service e.g. localhost:6009. + duration_ms: Duration of tracing or monitoring in ms. + monitoring_level: Choose a monitoring level between 1 and 2 to monitor your + job. Level 2 is more verbose than level 1 and shows more metrics. + display_timestamp: Set to true to display timestamp in monitoring result. + + Returns: + A string of monitoring output. + """ + return _pywrap_profiler.monitor(service_addr, duration_ms, monitoring_level, + display_timestamp) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/record.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/record.py new file mode 100644 index 0000000000000000000000000000000000000000..ea6c4ae4f8338e240a58ed545f346e4a025e630f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/record.py @@ -0,0 +1,121 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradient record utilities.""" + +import contextlib + +from tensorflow.python import pywrap_tfe + + +class VariableWatcher(object): + """A scope that tracks all trainable variable accesses within it. + + This explicitly ignores variables that are not marked as trainable. + + Sample usage: + + var = tf.Variable(0.0) + with VariableWatcher() as variable_watcher: + var.assign_add(1.0) + + assert variable_watcher.watched_variables == [var] + """ + + __slots__ = ["_variable_watcher"] + + def __init__(self): + self._variable_watcher = None + + def __enter__(self): + self._variable_watcher = pywrap_tfe.TFE_Py_VariableWatcherNew() + return self + + def __exit__(self, typ, value, traceback): + pywrap_tfe.TFE_Py_VariableWatcherRemove(self._variable_watcher) + + def watched_variables(self): + """Returns a tuple of variables accessed under this scope.""" + return pywrap_tfe.TFE_Py_VariableWatcherWatchedVariables( + self._variable_watcher) + + +@contextlib.contextmanager +def stop_recording(): + """Stop all gradient recording (backprop and forwardprop).""" + is_stopped = pywrap_tfe.TFE_Py_TapeSetIsStopped() + try: + if not is_stopped: + pywrap_tfe.TFE_Py_TapeSetStopOnThread() + yield + finally: + if not is_stopped: + pywrap_tfe.TFE_Py_TapeSetRestartOnThread() + + +def should_record_backprop(tensors): + """Returns true if any tape in the stack watches any of these tensors. + + Only takes GradientTapes into account, not forward accumulators. + + Args: + tensors: Tensors to check, typically inputs to an operation. + + Returns: + Boolean, whether any tape watches any of `tensors`. + """ + return pywrap_tfe.TFE_Py_TapeSetShouldRecordBackprop(tensors) + + +def record_operation(op_type, output_tensors, input_tensors, backward_function, + forward_function=None): + """Records the operation on all tapes in the stack.""" + pywrap_tfe.TFE_Py_TapeSetRecordOperation(op_type, output_tensors, + input_tensors, backward_function, + forward_function) + + +def record_operation_backprop_only(op_type, output_tensors, input_tensors, + backward_function): + """Records the operation on all backward tapes in the stack.""" + pywrap_tfe.TFE_Py_TapeSetRecordOperationBackprop(op_type, output_tensors, + input_tensors, + backward_function) + + +def record_operation_forwardprop_only(op_type, output_tensors, input_tensors, + backward_function, + forwardprop_output_indices): + """Records the operation on all forward accumulators in the stack. + + Args: + op_type: a string for the operation type, used in the backprop code + output_tensors: a list of Python Tensor objects output by the operation + input_tensors: a list of input Tensors to the recorded operation + backward_function: the function to be called to, given the gradients of the + output tensors, produce the gradients of the input tensors. This function + is automatically transposed to produce output gradients given input + gradients. + forwardprop_output_indices: indicates any output_tensors which contain JVPs. + Typically these will have come from TFE_Py_PackForwardGradients. May be + None or an empty sequence if there are no JVP outputs from the operation. + """ + pywrap_tfe.TFE_Py_TapeSetRecordOperationForwardprop( + op_type, output_tensors, input_tensors, backward_function, + forwardprop_output_indices) + + +def could_possibly_record(): + """Returns True if any tape is active.""" + return not pywrap_tfe.TFE_Py_TapeSetIsEmpty() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/remote.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/remote.py new file mode 100644 index 0000000000000000000000000000000000000000..2808f5a056f00a9933129c9fc4e567a55371ead5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/remote.py @@ -0,0 +1,279 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helpers to connect to remote servers.""" + +import copy + +from absl import logging + +from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef +from tensorflow.python import pywrap_tfe +from tensorflow.python.distribute import device_util +from tensorflow.python.distribute.cluster_resolver import cluster_resolver +from tensorflow.python.eager import context +from tensorflow.python.framework import ops +from tensorflow.python.platform import remote_utils +from tensorflow.python.training import server_lib +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + + +_GRPC_PREFIX = "grpc://" +_LOCAL_MASTERS = ("", "local") + + +@tf_export("config.experimental_connect_to_host") +def connect_to_remote_host(remote_host=None, job_name="worker"): + """Connects to a single machine to enable remote execution on it. + + Will make devices on the remote host available to use. Note that calling this + more than once will work, but will invalidate any tensor handles on the old + remote devices. + + Using the default job_name of worker, you can schedule ops to run remotely as + follows: + ```python + # When eager execution is enabled, connect to the remote host. + tf.config.experimental_connect_to_host("exampleaddr.com:9876") + + with ops.device("job:worker/replica:0/task:1/device:CPU:0"): + # The following tensors should be resident on the remote device, and the op + # will also execute remotely. + x1 = array_ops.ones([2, 2]) + x2 = array_ops.ones([2, 2]) + y = math_ops.matmul(x1, x2) + ``` + + Args: + remote_host: a single or a list the remote server addr in host-port format. + job_name: The job name under which the new server will be accessible. + + Raises: + ValueError: if remote_host is None. + """ + if not remote_host: + raise ValueError("Must provide at least one remote_host") + + remote_hosts = nest.flatten(remote_host) + cluster_spec = server_lib.ClusterSpec( + {job_name: [_strip_prefix(host, _GRPC_PREFIX) for host in remote_hosts]}) + + connect_to_cluster(cluster_spec) + + +@tf_export("config.experimental_connect_to_cluster") +def connect_to_cluster(cluster_spec_or_resolver, + job_name="localhost", + task_index=0, + protocol=None, + make_master_device_default=True, + cluster_device_filters=None): + """Connects to the given cluster. + + Will make devices on the cluster available to use. Note that calling this more + than once will work, but will invalidate any tensor handles on the old remote + devices. + + If the given local job name is not present in the cluster specification, it + will be automatically added, using an unused port on the localhost. + + Device filters can be specified to isolate groups of remote tasks to avoid + undesired accesses between workers. Workers accessing resources or launching + ops / functions on filtered remote devices will result in errors (unknown + devices). For any remote task, if no device filter is present, all cluster + devices will be visible; if any device filter is specified, it can only + see devices matching at least one filter. Devices on the task itself are + always visible. Device filters can be particially specified. + + For example, for a cluster set up for parameter server training, the following + device filters might be specified: + + ```python + cdf = tf.config.experimental.ClusterDeviceFilters() + # For any worker, only the devices on PS nodes and itself are visible + for i in range(num_workers): + cdf.set_device_filters('worker', i, ['/job:ps']) + # Similarly for any ps, only the devices on workers and itself are visible + for i in range(num_ps): + cdf.set_device_filters('ps', i, ['/job:worker']) + + tf.config.experimental_connect_to_cluster(cluster_def, + cluster_device_filters=cdf) + ``` + + Args: + cluster_spec_or_resolver: A `ClusterSpec` or `ClusterResolver` describing + the cluster. + job_name: The name of the local job. + task_index: The local task index. + protocol: The communication protocol, such as `"grpc"`. If unspecified, will + use the default from `python/platform/remote_utils.py`. + make_master_device_default: If True and a cluster resolver is passed, will + automatically enter the master task device scope, which indicates the + master becomes the default device to run ops. It won't do anything if + a cluster spec is passed. Will throw an error if the caller is currently + already in some device scope. + cluster_device_filters: an instance of + `tf.train.experimental/ClusterDeviceFilters` that specify device filters + to the remote tasks in cluster. + """ + if not context.executing_eagerly(): + raise ValueError( + "`tf.config.experimental_connect_to_cluster` can only be called in " + "eager mode." + ) + protocol = protocol or remote_utils.get_default_communication_protocol() + if isinstance(cluster_spec_or_resolver, server_lib.ClusterSpec): + cluster_spec = cluster_spec_or_resolver + elif isinstance(cluster_spec_or_resolver, cluster_resolver.ClusterResolver): + if cluster_spec_or_resolver.master() in _LOCAL_MASTERS: + # Do nothing if the master is local. + return + cluster_spec = cluster_spec_or_resolver.cluster_spec() + else: + raise ValueError( + "`cluster_spec_or_resolver` must be a `ClusterSpec` or a " + "`ClusterResolver`.") + + cluster_def = copy.deepcopy(cluster_spec.as_cluster_def()) + if cluster_device_filters: + if isinstance(cluster_device_filters, server_lib.ClusterDeviceFilters): + cluster_device_filters = copy.deepcopy( + cluster_device_filters._as_cluster_device_filters()) # pylint: disable=protected-access + else: + raise ValueError("`cluster_device_filters` must be an instance of " + "`tf.train.experimental.ClusterDeviceFilters`.") + + # Check whether the server def has changed. We need to do the check before the + # local job is added to the cluster. + is_server_def_changed = False + current_server_def = context.get_server_def() + if current_server_def and job_name not in cluster_spec.jobs: + for i, job in enumerate(current_server_def.cluster.job): + if job.name == job_name: + del current_server_def.cluster.job[i] + if (current_server_def is None or current_server_def.cluster != cluster_def or + current_server_def.job_name != job_name or + current_server_def.task_index != task_index): + is_server_def_changed = True + + # Automatically add local job, if not part of the cluster spec. + if job_name not in cluster_spec.jobs: + local_port = pywrap_tfe.TF_PickUnusedPortOrDie() + job_def = cluster_def.job.add() + job_def.name = job_name + # TODO(fishx): Update this to make sure remote worker has valid ip address + # to connect with local. + job_def.tasks[0] = "localhost:{}".format(local_port) + + if context.context().coordination_service is None: + service_type = remote_utils.coordination_service_type(protocol) + service_leader = "" + # Maybe enable coordination service for the communication protocol + # TODO(b/243839559): Fix UPTC + Coordination service crashing + # Check if cluster_spec_or_resolver is an instance of + # tpu_cluster_resolver.TPUClusterResolver + if (isinstance(cluster_spec_or_resolver, cluster_resolver.ClusterResolver) + and hasattr(cluster_spec_or_resolver, "tpu_hardware_feature")): + service_leader = cluster_spec_or_resolver.get_coordination_service_leader( + ) + # Maybe enable coordination service internally. + if cluster_spec_or_resolver.environment == "google": + is_uptc_sess = ".uptc-worker." in cluster_spec_or_resolver.master() + service_type = remote_utils.coordination_service_type( + protocol, is_uptc_sess) + # Enable coordination service for Cloud TPU. + else: + service_type = "standalone" + + if service_type: + # If `enable_health_check` is true, coordination service agent would + # do connecting (and tasks would send heartbeat if connection is set up) + # while creating eager contexts. Enabling health check does not mutate + # coordination service. + context.context().configure_coordination_service( + service_type=service_type, + service_leader=service_leader, + enable_health_check=False) + + default_session_config = copy.deepcopy(context.context().config) + + for name in cluster_spec.jobs: + # assuming any of the non-local job is the worker jobs. + # should we use cluster_spec_or_resolver.get_job_name() instead when + # it is available? + # maybe consolicate this with the 'master' logic below + if name == job_name: + continue + + default_session_config.experimental.collective_group_leader = ( + f"/job:{name}/replica:0/task:0" + ) + + logging.info("default session config: %s", default_session_config) + + server_def = ServerDef( + cluster=cluster_def, + job_name=job_name, + task_index=task_index, + protocol=protocol, + default_session_config=default_session_config, + cluster_device_filters=cluster_device_filters, + ) + + if is_server_def_changed: + context.set_server_def(server_def) + else: + context.update_server_def(server_def) + + if make_master_device_default and isinstance( + cluster_spec_or_resolver, + cluster_resolver.ClusterResolver) and cluster_spec_or_resolver.master(): + master = cluster_spec_or_resolver.master() + master_job_name = None + master_task_id = None + for job_name in cluster_spec.jobs: + for task_id in cluster_spec.task_indices(job_name): + task_address = cluster_spec.task_address(job_name, task_id) + if master in task_address or task_address in master: + master_job_name = job_name + master_task_id = task_id + break + + if not master_job_name: + raise ValueError( + "`make_master_device_default` is set to True but cannot find " + "master %s in the cluster" % master) + + master_device = "/job:{}/replica:0/task:{}".format(master_job_name, + master_task_id) + master_device = device_util.canonicalize(master_device) + current_device = device_util.current() + if current_device: + current_device = device_util.canonicalize(current_device) + if current_device and current_device != master_device: + raise ValueError("`connect_to_cluster` is called inside existing device " + "scope %s, which is different from the master device " + "scope %s to enter. This is not allowed." % + (current_device, master_device)) + # TODO(b/138389076): Think of the entering device scope behavior in the + # failure recovery case when dealing with preemptions. + if not current_device: + logging.info("Entering into master device scope: %s", master_device) + ops.device(master_device).__enter__() + + +def _strip_prefix(s, prefix): + return s[len(prefix):] if s.startswith(prefix) else s diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/tape.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/tape.py new file mode 100644 index 0000000000000000000000000000000000000000..194ccd5a9116eb48a86bb04a2f835415535173cb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/tape.py @@ -0,0 +1,109 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gradient tape utilities.""" + +from tensorflow.python import pywrap_tfe + + +class Tape(object): + """Represents a gradient propagation trace.""" + + __slots__ = ["_tape"] + + def __init__(self, tape): + self._tape = tape + + def watched_variables(self): + return pywrap_tfe.TFE_Py_TapeWatchedVariables(self._tape) + + +def push_new_tape(persistent=False, watch_accessed_variables=True): + """Pushes a new tape onto the tape stack.""" + tape = pywrap_tfe.TFE_Py_TapeSetNew(persistent, watch_accessed_variables) + return Tape(tape) + + +def push_tape(tape): + """Pushes an existing tape onto the tape stack.""" + pywrap_tfe.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access + + +def watch(tape, tensor): + """Marks this tensor to be watched by the given tape.""" + pywrap_tfe.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access + + +def default_get_variables(variable): + return [variable] + +# Gets a list of changed variables. Can be overriden using +# register_variables_override. An example of overriding is for getting the +# varibles within a distributed context. +_variables_override = default_get_variables + + +def register_watched_variable_resolver(resolver): + """Registers the resolver to be used to get the list of variables to watch. + + Args: + resolver: callable, takes a Variable and returns a list of Variables that + shall be watched. + """ + global _variables_override + assert _variables_override is default_get_variables + _variables_override = resolver + + +def watch_variable(tape, variable): + """Marks this variable to be watched by the given tape.""" + variables = _variables_override(variable) + for var in variables: + pywrap_tfe.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access + pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var) + + +def variable_accessed(variable): + """Notifies all tapes in the stack that a variable has been accessed. + + Args: + variable: variable to be watched. + """ + variables = _variables_override(variable) + for var in variables: + pywrap_tfe.TFE_Py_TapeVariableAccessed(var) + pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var) + + +def variables_accessed(variables): + """Notifies all tapes in the stack that variables have been accessed. + + Only trainable variables are marked as accessed. + + Args: + variables: iterable of variables to mark as accessed. + """ + accessed = [] + for variable in variables: + if variable.trainable: + accessed.extend(_variables_override(variable)) + + for var in accessed: + pywrap_tfe.TFE_Py_TapeVariableAccessed(var) + pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var) + + +def pop_tape(tape): + """Pops the given tape in the stack.""" + pywrap_tfe.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/test.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/test.py new file mode 100644 index 0000000000000000000000000000000000000000..be1ccc532911f543233cecc74af11cf2ea0f1fdf --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/test.py @@ -0,0 +1,25 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for testing tfe code.""" + +from tensorflow.python.framework import ops as _ops +from tensorflow.python.platform import test as _test +from tensorflow.python.platform.test import * # pylint: disable=wildcard-import + + +# TODO(akshayka): Do away with this file. +def main(argv=None): # pylint: disable=function-redefined + _ops.enable_eager_execution() + _test.main(argv) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/wrap_function.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/wrap_function.py new file mode 100644 index 0000000000000000000000000000000000000000..5a641aba2da70f0a03f783e35f93b2db0a944cf7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/eager/wrap_function.py @@ -0,0 +1,678 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=unidiomatic-typecheck +"""Prototype decorator for defining legacy-graph-mode functions.""" + +import weakref + +from tensorflow.core.function.polymorphism import function_type as function_type_lib +from tensorflow.core.protobuf import meta_graph_pb2 +from tensorflow.core.protobuf import struct_pb2 +from tensorflow.python.eager import context +from tensorflow.python.eager import function +from tensorflow.python.eager import lift_to_graph +from tensorflow.python.eager.polymorphic_function import atomic_function +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import importer +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.trackable import data_structures +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + + +class VariableHolder(object): + """Holds variables for a python function.""" + + def __init__(self, fn=None, share_variables=False): + self._fn = fn + + self._share_variables = share_variables + self._variables_by_name = data_structures.Mapping() + + @property + def variables(self): + return self._variables_by_name + + def variable_creator_scope(self, next_creator, **kwargs): + """Creates variables & adds them to collections to match legacy code.""" + collections = kwargs.pop("collections", None) + v = None + + # Get expected variable name. + with ops.name_scope( + kwargs.get("name", None), "Variable", skip_on_eager=False) as name: + variable_name = ops.name_from_scope_name(name) + kwargs["name"] = name + + if self._share_variables: + v = self._variables_by_name.get(variable_name, None) + + if v is None: + v = next_creator(**kwargs) + self._variables_by_name[variable_name] = v + + if collections is None: + collections = [ops.GraphKeys.GLOBAL_VARIABLES] + if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections: + collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES] + + ops.add_to_collections(collections, v) + + return v + + def __call__(self, *args, **kwargs): + return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs) + + def call_with_variable_creator_scope(self, fn): + + def wrapped(*args, **kwargs): + with variable_scope.variable_creator_scope(self.variable_creator_scope): + return fn(*args, **kwargs) + + return wrapped + + +def _get_element_from_tensor_info(tensor_info, graph): + """Simplified copy of the deprecated `get_tensor_from_tensor_info`.""" + encoding = tensor_info.WhichOneof("encoding") + if encoding == "name": + # We may get operations here in some cases. TensorInfo is a bit of a + # misnomer if so. + return graph.as_graph_element(tensor_info.name) + elif encoding == "coo_sparse": + return sparse_tensor.SparseTensor( + graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name), + graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name), + graph.get_tensor_by_name( + tensor_info.coo_sparse.dense_shape_tensor_name)) + elif encoding == "composite_tensor": + spec_proto = struct_pb2.StructuredValue( + type_spec_value=tensor_info.composite_tensor.type_spec) + spec = nested_structure_coder.decode_proto(spec_proto) + components = [graph.get_tensor_by_name(component.name) for component in + tensor_info.composite_tensor.components] + return spec._from_components(components) # pylint: disable=protected-access + else: + raise ValueError(f"Invalid TensorInfo.encoding: {encoding}. Valid " + "encodings are 'name', 'coo_sparse', and " + "'composite_tensor'.") + + +def _lift_single_variable(old_variable, graph, variable_holder): + """Lifts `old_variable` out of the `FuncGraph` `graph`.""" + new_variable = resource_variable_ops.UninitializedVariable( + shape=old_variable.shape, + dtype=old_variable.dtype, + name=old_variable.op.name, + trainable=old_variable.trainable, + extra_handle_data=old_variable.handle) + new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access + graph.add_capture(new_variable.handle, old_variable.handle) + # Now that we've added the new variable to graph.captures, + # graph.capture will use that cached value and do some post-processing + # on the capture like recording it on the tape. + graph.capture(new_variable.handle) + # pylint: disable=protected-access + variable_name = new_variable.name.split(":")[0] + variable_holder._variables_by_name[variable_name] = new_variable + graph._weak_variables.append(weakref.ref(new_variable)) + # pylint: enable=protected-access + graph.watch_variable(new_variable) + return new_variable + + +def _lift_unlifted_variables(graph, variable_holder): + """Finds resource variables and lifts them into the outer context. + + When we import a GraphDef inside a wrap_function, no Python graph building + code runs. This means we get VarHandleOps which create variable resources, + but no corresponding Python objects. Leaving them like this works but gives + the user no way to interact with or modify the variables outside the graph. + + This method searches for variables and lifts them out as regular variable + objects when possible, indicating to the FuncGraph that they are captures. + + Args: + graph: The FuncGraph to lift variables from. + variable_holder: A VariableHolder to record the lifted variables in. + """ + with graph.as_default(): + global_collection_variables = ops.get_collection( + ops.GraphKeys.GLOBAL_VARIABLES) + local_collection_variables = ops.get_collection( + ops.GraphKeys.LOCAL_VARIABLES) + existing_captures = {id(c) for c in graph.internal_captures} + lifted_variables = {} + + def _should_lift_variable(v): + return ((v._in_graph_mode # pylint: disable=protected-access + and v.graph.building_function) + and isinstance(v, resource_variable_ops.BaseResourceVariable) + and id(v.handle) not in existing_captures) + + for old_variable in global_collection_variables: + if _should_lift_variable(old_variable): + new_variable = _lift_single_variable( + old_variable, graph, variable_holder) + lifted_variables[id(old_variable)] = new_variable + existing_captures.add(id(old_variable.handle)) + + for old_variable in local_collection_variables: + if _should_lift_variable(old_variable): + new_variable = _lift_single_variable( + old_variable, graph, variable_holder) + lifted_variables[id(old_variable)] = new_variable + existing_captures.add(id(old_variable.handle)) + if new_variable._in_graph_mode: # pylint: disable=protected-access + outer_graph = new_variable.graph + # Variables are added to the global collection by default. In this + # case we only want the variable in the local collection, so we'll pop + # it out. + global_collection = outer_graph.get_collection_ref( + ops.GraphKeys.GLOBAL_VARIABLES) + global_collection.remove(new_variable) + outer_graph.add_to_collection( + ops.GraphKeys.LOCAL_VARIABLES, new_variable) + + # Update the FuncGraph's collections, partly for the user and partly so this + # function is idempotent when it runs again in prune() calls. + for collection_name in [ + ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES + ]: + mutable_collection = ops.get_collection_ref(collection_name) + for index, current in enumerate(mutable_collection): + mutable_collection[index] = lifted_variables.get(id(current), current) + if not resource_variable_ops.is_resource_variable( + mutable_collection[index]): + logging.log_first_n( + logging.WARN, + "Unable to create a python object for variable {} because it is " + "a reference variable. It may not be visible to training APIs. " + "If this is a problem, consider rebuilding the SavedModel after " + "running tf.compat.v1.enable_resource_variables().".format( + mutable_collection[index]), + 5) + + +# TODO(allenl): make this trackable +class WrappedFunction(function.ConcreteFunction): + """Wraps a tf V1 piece of code in a function.""" + + def __init__(self, fn_graph, variable_holder, attrs=None, signature=None): + self._variable_holder = variable_holder + _lift_unlifted_variables(fn_graph, variable_holder) + # We call __init__ after lifting variables so that the function's signature + # properly reflects the new captured inputs. + for f in fn_graph.as_graph_def(use_pybind11_proto=True).library.function: + context.context().add_function_def(f) + self._signature = signature + function_type = function_type_lib.from_structured_signature( + fn_graph.structured_input_signature, + fn_graph.structured_outputs, + fn_graph.function_captures.capture_types, + ) + atomic_fn = atomic_function.from_func_graph( + function._inference_name(fn_graph.name), fn_graph, attrs, function_type + ) + super().__init__(atomic_fn) + + def _call_impl(self, args, kwargs): + if self._arg_keywords is None: + if kwargs: + raise NotImplementedError( + "Keyword arguments are not supported when calling a " + f"wrap_function-decorated function. Got {kwargs}.") + if self._signature is not None: + args = list(args) + for i, arg in enumerate(args): + if isinstance(self._signature[i], tensor_lib.DenseSpec): + args[i] = ops.convert_to_tensor(arg, self._signature[i].dtype) + return self._call_flat(args, self.captured_inputs) + else: + return super()._call_impl(args, kwargs) + + def prune(self, feeds, fetches, name=None, input_signature=None): + """Extract a subgraph of this function's underlying graph. + + Wraps the subgraph in a new `WrappedFunction` object. + + Args: + feeds: Input tensors to the subgraph to extract, as `Tensor` objects. + fetches: Possibly-nested Python data structure containing information + about outputs of the target subgraph. Each entry can either be a + `Tensor` object (for data outputs), an `Operation` object (for control + outputs), or a `TensorInfo` proto. Any additional shape/dtype + information provided in a `TensorInfo` and not present in the original + graph will be added to the returned subgraph. + name: (optional) Name to give to the underlying `FuncGraph` of the + returned object. If no name is provided, the graph's name will be + `"pruned"`. + input_signature: (optional) possibly-nested Python data structure + containing `TensorSpec` objects, with which to populate the returned + functions's `FuncGraph`'s `structured_input_signature` field. + + Returns: + A new `WrappedFunction` object containing a copy of the portion of this + object's graph that goes from `feeds` to `fetches`. + """ + # TODO(b/129646028): Add support for CompositeTensors. + name = name or "pruned" + flat_feeds = nest.flatten(feeds, expand_composites=True) + flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds] + for f in flat_feeds: + if not isinstance(f, tensor_lib.Tensor): + raise ValueError( + "All members of argument `feeds` must be tensors. " + f"Got {f} with type {type(f)}." + ) + + # Ignoring all feeds that are captures allows prune to be called + # using wrapped_func.inputs even when it uses variables + internal_captures = {id(c) for c in self.graph.internal_captures} + flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures] + + operation_fetches = [] + tensor_fetches = [] + tensor_infos = [] + + def _fetch_preprocessing_callback(fetch): + """Extract out lists of ops, tensors, and tensor type info. + + Turns TensorInfos into Tensors in the original `fetches` structure. + Also extracts ops from `fetches`. + + Args: + fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or + string identifying a Tensor or Operation. + + Returns: + `fetch` converted to a Tensor. + """ + if isinstance(fetch, ops.Operation): + operation_fetches.append(fetch) + return fetch + elif isinstance(fetch, meta_graph_pb2.TensorInfo): + tensor_infos.append(fetch) + decoded = _get_element_from_tensor_info(fetch, self._func_graph) + if (tensor_util.is_tf_type(decoded) or + isinstance(decoded, composite_tensor.CompositeTensor)): + tensor_fetches.append(decoded) + else: + operation_fetches.append(decoded) + return decoded + elif isinstance( + fetch, (tensor_lib.Tensor, composite_tensor.CompositeTensor)): + tensor_fetches.append(fetch) + return fetch + else: + graph_element = self.graph.as_graph_element(fetch) + return _fetch_preprocessing_callback(graph_element) + + fetches = nest.map_structure(_fetch_preprocessing_callback, fetches) + + # Expand composite tensors into their component dense Tensors. + tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True) + + for f in flat_feeds + tensor_fetches + operation_fetches: + if f.graph is not self._func_graph: + raise ValueError("Can only prune function whose feeds and fetches " + f"from graph {self._func_graph}. Input " + f"{f} is from a different graph {f.graph}.") + with self._func_graph.as_default(): + pruned_graph = func_graph.FuncGraph(name) + lift_map = lift_to_graph.lift_to_graph( + operation_fetches + tensor_fetches, + pruned_graph, + sources=flat_feeds + self.graph.internal_captures, + base_graph=self._func_graph) + + # Note that we add the component tensors of any composite tensors to the + # returned function's outputs list; the list must contain these component + # tensors, or the function's sparse outputs won't work properly. + pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches) + pruned_graph.control_outputs.extend( + [lift_map[operation] for operation in operation_fetches]) + pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds) + for external_capture, internal_capture in self.graph.captures: + pruned_graph.add_capture(external_capture, lift_map[internal_capture]) + for ti in tensor_infos: + if ti.WhichOneof("encoding") == "name": # Dense tensors only + t = pruned_graph.as_graph_element(ti.name) + if tensor_util.is_tf_type(t): + t.set_shape(tensor_shape.TensorShape(ti.tensor_shape)) + # pylint: disable=protected-access + for f in self.graph._functions.values(): + pruned_graph._add_function(f) + # pylint: enable=protected-access + + pruned_graph.variables = self.graph.variables + + def _structured_output_mapping(fetched): + """callback for `nest.map_structure()`""" + lifted = lift_map[fetched] + if isinstance(lifted, ops.Operation): + return None + return lifted + + # expand_composites=True here causes composite tensors to be expanded + # into their component dense Tensors, mapped to the new graph, and then + # reconstituted into their original composite form. + pruned_graph.structured_outputs = nest.map_structure( + _structured_output_mapping, fetches, expand_composites=True) + + if input_signature: + # canonicalize the signature before setting + args, kwargs = input_signature + args = () if args is None else args + input_signature = (args, kwargs) + + pruned_graph.structured_input_signature = input_signature + pruned_fn = WrappedFunction( + pruned_graph, variable_holder=self._variable_holder) + pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access + # TODO(kathywu): Enable keyword arguments if an input signature is specified + pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access + return pruned_fn + + +def _filter_returned_ops(fn): + """Filtering out any ops returned by function. + + Args: + fn: a function + + Returns: + A tuple of ( + Wrapped function that returns `None` in place of any ops, + dict that maps the index in the flat output structure to the returned op + ) + """ + returned_ops = {} + + def wrap_and_filter_returned_ops(*args, **kwargs): + outputs = fn(*args, **kwargs) + flat_outputs = nest.flatten(outputs) + for n in range(len(flat_outputs)): + output = flat_outputs[n] + if isinstance(output, ops.Operation): + returned_ops[n] = output + flat_outputs[n] = None + return nest.pack_sequence_as(outputs, flat_outputs) + + return wrap_and_filter_returned_ops, returned_ops + + +class WrappedGraph(object): + """Class for wrapping multiple TF 1.X functions in a single graph. + + Maintains a dictionary mapping names to wrapped functions. See + `tf.compat.v1.wrap_function` to learn more about wrapping V1 functions. + + Functions wrapped using this class have access to variables and collections + created in other wrapped functions, using the standard TF 1.X API ( + `tf.compat.v1.get_variable` or + `tf.compat.v1.get_default_graph().get_collection(...)`) + + Outside a function, variables and collections may be accessed using the + `variables` and `graph` properties. + + Example: + + ``` + def add_v1(x): + with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE): + v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32) + return v + x + + def increment_var_v1(x): + with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE): + v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32) + return v.assign_add(x) + + g = WrappedGraph() + add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)]) + increment_var = g.wrap_function(increment_var_v1, + [tf.TensorSpec([], tf.int32)]) + + assert len(g.variables) == 1 + assert g.variables[0].numpy() == 0 + increment_var(tf.constant(5)) + assert g.variables[0].numpy() == 5 + + ``` + """ + + def __init__(self, variable_holder=None, **kwargs): + self._variable_holder = ( + variable_holder or VariableHolder(share_variables=True)) + + name = kwargs.pop("name", "wrapped_function_graph") + # Always start with empty collections, unless otherwise specified. Setting + # `collections=None` will copy the collections from the outer graph. + collections = kwargs.pop("collections", {}) + self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs) + + self._wrapped_function = WrappedFunction(self.graph, self._variable_holder) + self._functions = {} + + @property + def functions(self): + return self._functions + + @property + def variables(self): + return self._variable_holder.variables + + def wrap_function(self, fn, signature, name=None): + """Wraps a TF 1.X function and returns an eager-compatible function. + + All functions wrapped in the same `WrappedGraph` will have access to the + same graph (`tf.compat.v1.get_default_graph` to get the graph object + within a function, or `WrappedGraph.graph` to get the graph outside a + function). Variables created within the function will be added to the + `variables` list. + + Function inputs: All inputs to the function must be tensors (nested ok), + with their shapes and dtypes defined in the `signature` argument. + + Function outputs: + + * The 1.X function may return tensors, variables, and ops. The wrapped + eager-compatible function will always return tensors in the same nested + structure. + * Variables are replaced with a tensor containing the latest read values. + * Returned ops are executed, and replaced with None. + * The order of op execution and variable reads in the return is + nondeterministic. For example: + + ``` + def update_var(x): + v = tf.Variable(0) + op = tf.compat.v1.assign(v, x).op + return v, op + + g = WrappedGraph() + fn = g.wrap_function(update_var) + read_value, _ = fn(tf.constant(3)) + print(read_value.numpy()) # could be 0 or 3 + print(g.variables[0].numpy()) # always 3 + ``` + + To ensure that ops in the function are executed (e.g. ops added to the + `tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns. + + Args: + fn: a 1.X tensorflow function. + signature: a possibly nested sequence of `TensorSpecs` specifying the + shapes and dtypes of the arguments. + name: an optional string name for the function. The function will be saved + with key `name` in the `functions` dictionary. + + Returns: + An eager-compatible function. + """ + return self._wrap_function(fn, signature=signature, name=name) + + def _wrap_function(self, + fn, + args=None, + kwargs=None, + signature=None, + name=None): + """Internal wrap function method with extended func_graph arguments.""" + fn_with_filter_and_scope, returned_ops = _filter_returned_ops( + self._variable_holder.call_with_variable_creator_scope(fn)) + + func_graph.func_graph_from_py_func( + None, # Name is unused. + fn_with_filter_and_scope, + args=args, + kwargs=kwargs, + signature=signature, + add_control_dependencies=False, + func_graph=self.graph) + + # This code relies on questional behavior from `func_graph_from_py_func`. + # If an existing FuncGraph is passed into the `func_graph` arg, the inputs + # and structured outputs are overwritten. Pretty sure this is a bug, + # because structured outputs doesn't match up with the outputs... + fn_inputs = self.graph.inputs[:-len(self.graph.captures)] + + # Return filtered ops to the flattened outputs. + flat_fn_outputs = nest.flatten(self.graph.structured_outputs) + for index, op in returned_ops.items(): + flat_fn_outputs[index] = op + fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs, + flat_fn_outputs) + + name = name or fn.__name__ + wrapped_function = self._wrapped_function.prune( + fn_inputs, fn_outputs, name, self.graph.structured_input_signature) + self._functions[name] = wrapped_function + return wrapped_function + + +@tf_export(v1=["wrap_function"]) +def wrap_function(fn, signature, name=None): + """Wraps the TF 1.x function fn into a graph function. + + The python function `fn` will be called once with symbolic arguments specified + in the `signature`, traced, and turned into a graph function. Any variables + created by `fn` will be owned by the object returned by `wrap_function`. The + resulting graph function can be called with tensors which match the + signature. + + ```python + def f(x, do_add): + v = tf.Variable(5.0) + if do_add: + op = v.assign_add(x) + else: + op = v.assign_sub(x) + with tf.control_dependencies([op]): + return v.read_value() + + f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True]) + + assert float(f_add(1.0)) == 6.0 + assert float(f_add(1.0)) == 7.0 + + # Can call tf.compat.v1.wrap_function again to get a new trace, a new set + # of variables, and possibly different non-template arguments. + f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False]) + + assert float(f_sub(1.0)) == 4.0 + assert float(f_sub(1.0)) == 3.0 + ``` + + Both `tf.compat.v1.wrap_function` and `tf.function` create a callable + TensorFlow graph. But while `tf.function` runs all stateful operations + (e.g. `tf.print`) and sequences operations to provide the same semantics as + eager execution, `wrap_function` is closer to the behavior of `session.run` in + TensorFlow 1.x. It will not run any operations unless they are required to + compute the function's outputs, either through a data dependency or a control + dependency. Nor will it sequence operations. + + Unlike `tf.function`, `wrap_function` will only trace the Python function + once. As with placeholders in TF 1.x, shapes and dtypes must be provided to + `wrap_function`'s `signature` argument. + + Since it is only traced once, variables and state may be created inside the + function and owned by the function wrapper object. + + Args: + fn: python function to be wrapped + signature: the placeholder and python arguments to be passed to the wrapped + function + name: Optional. The name of the function. + + Returns: + the wrapped graph function. + """ + holder = VariableHolder(fn) + func_graph_name = "wrapped_function" + if name is not None: + func_graph_name = "wrapped_function_" + name + return WrappedFunction( + func_graph.func_graph_from_py_func( + func_graph_name, + holder, + args=None, + kwargs=None, + signature=signature, + add_control_dependencies=False, + collections={}), + variable_holder=holder, + signature=signature) + + +def function_from_graph_def(graph_def, inputs, outputs, captures=None): + """Creates a ConcreteFunction from a GraphDef. + + Args: + graph_def: A GraphDef to make a function out of. + inputs: A Tensor name or nested structure of names in `graph_def` which + should be inputs to the function. + outputs: A Tensor name or nested structure of names in `graph_def` which + should be outputs of the function. + captures: (Optional) A dictionary mapping node names in `graph_def` that + should be captured as inputs to tensors containing the value of the + captured inputs. + + Returns: + A ConcreteFunction. + """ + + def _imports_graph_def(): + importer.import_graph_def(graph_def, name="") + graph = ops.get_default_graph() + if captures is not None: + for c in captures: + graph.add_capture(captures[c], graph.get_tensor_by_name(str(c) + ":0")) + + wrapped_import = wrap_function(_imports_graph_def, []) + import_graph = wrapped_import.graph + return wrapped_import.prune( + nest.map_structure(import_graph.as_graph_element, inputs), + nest.map_structure(import_graph.as_graph_element, outputs)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d489ad1fd2cb70b387c8fdc124756dc27bd4ee54 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf65bb7a892b7657b5be1c72bf354a4051068dbd Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/__pycache__/file_io.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/__pycache__/file_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4eb6b20cca888e02402df18ebb7bdae7c1ebf58 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/__pycache__/file_io.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_experimental_dataset_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_experimental_dataset_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f38c9e67c1df0d7c1d3badd27b74f2722e3abc9a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_experimental_dataset_ops.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23f7c1f7dc1c12ef3b6cbe3ebafa01774cb5987258488f96bb31c180faff6fad +size 269930 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_tpu_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_tpu_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..431d8995a60259e74c1322b8b750e272ce2745a5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_tpu_ops.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb728df903c9b1d410cda35db60f3af3d9d18d025a8c6b4d73c3f5b09e082f25 +size 172593