diff --git a/.gitattributes b/.gitattributes index 4d682be9465e975d7f90a947b9a00fc769c05d0e..0c7cf931b55de1929b0aaeb8ef192799a588f71a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -196,3 +196,4 @@ SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/pl SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_optimizer.so filter=lfs diff=lfs merge=lfs -text SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_item.so filter=lfs diff=lfs merge=lfs -text SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_profiler.so filter=lfs diff=lfs merge=lfs -text +SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so filter=lfs diff=lfs merge=lfs -text diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e37691961fb3f6c6e611edce7fa7247ce648b5cd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/__init__.py @@ -0,0 +1,57 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Conversion of eager-style Python into TensorFlow graph code. + +NOTE: In TensorFlow 2.0, AutoGraph is automatically applied when using +`tf.function`. This module contains lower-level APIs for advanced use. + +AutoGraph transforms a subset of Python which operates on TensorFlow objects +into equivalent TensorFlow graph code. When executing the graph, it has the same +effect as if you ran the original code in eager mode. +Python code which doesn't operate on TensorFlow objects remains functionally +unchanged, but keep in mind that `tf.function` only executes such code at trace +time, and generally will not be consistent with eager execution. + +For more information, see the +[AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md), +and the [tf.function guide](https://www.tensorflow.org/guide/function#autograph_transformations). +""" + +from tensorflow.python.util.all_util import remove_undocumented + +# TODO(mdan): Revisit this list once we finalize the generated code mechanism. +_allowed_symbols = [ + # Main API + 'AutoGraphError', + 'ConversionOptions', + 'Feature', + 'StackTraceMapper', + 'convert', + 'converted_call', + 'do_not_convert', + 'to_code', + 'to_graph', + # Overloaded operators + 'operators', + # Python language "extensions" + 'set_element_type', + 'set_loop_options', + 'stack', + 'tensor_list', + # Utilities: to be removed + 'utils', +] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f0935065f25f6e18620bd6eae13b4da485d6eeb Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ad51474b30d4643a6f83a63ade7de83c8fd31be Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/directives.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/directives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a694f44739f57176b2c27216fe22afa816002f3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/directives.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/special_functions.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/special_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfb9591ac5dc47109f33c1a7d235b0f815a67fa4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/__pycache__/special_functions.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/directives.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/directives.py new file mode 100644 index 0000000000000000000000000000000000000000..c3647fc849e0dd36ad1aabdc09d232706c7b0390 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/directives.py @@ -0,0 +1,94 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Directives are special no-op functions that serve as compilation markers. + +They provide static information like type hints, compilation and TensorFlow +overrides. + +These serve as annotations in the compiled code, allowing the user some control +over the compilation process. They have no functional role at runtime. +""" + +from tensorflow.python.util.tf_export import tf_export + +UNSPECIFIED = object() + + +def set_element_type(entity, dtype, shape=UNSPECIFIED): + """Indicates that the entity is expected hold items of specified type/shape. + + The staged TensorFlow ops will reflect and assert this data type. Ignored + otherwise. + + Args: + entity: The entity to annotate. + dtype: TensorFlow dtype value to assert for entity. + shape: Optional shape to assert for entity. + """ + del entity + del dtype + del shape + + +@tf_export('autograph.experimental.set_loop_options') +def set_loop_options( + parallel_iterations=UNSPECIFIED, + swap_memory=UNSPECIFIED, + maximum_iterations=UNSPECIFIED, + shape_invariants=UNSPECIFIED): + """Specifies additional arguments to be passed to the enclosing while_loop. + + The parameters apply to and only to the immediately enclosing loop. It only + has effect if the loop is staged as a TF while_loop; otherwise the parameters + have no effect. + + Usage: + + >>> @tf.function(autograph=True) + ... def f(): + ... n = 0 + ... for i in tf.range(10): + ... tf.autograph.experimental.set_loop_options(maximum_iterations=3) + ... n += 1 + ... return n + + >>> @tf.function(autograph=True) + ... def f(): + ... v = tf.constant((0,)) + ... for i in tf.range(3): + ... tf.autograph.experimental.set_loop_options( + ... shape_invariants=[(v, tf.TensorShape([None]))] + ... ) + ... v = tf.concat((v, [i]), 0) + ... return v + + Also see tf.while_loop. + + Args: + parallel_iterations: The maximum number of iterations allowed to run in + parallel at any given time. Note that this does not guarantee parallel + execution. + swap_memory: Whether to store intermediate values needed for + gradients on the CPU instead of GPU. + maximum_iterations: Allows limiting the total number of iterations executed + by the loop. + shape_invariants: Allows controlling the argument with the same name passed + to tf.while_loop. Unlike tf.while_loop, this is a list of + `(tensor, shape)` pairs. + """ + del parallel_iterations + del swap_memory + del maximum_iterations + del shape_invariants diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/special_functions.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/special_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..a2fa91ef6cdd2938992e7a4febf8ec5f61bd5e13 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/lang/special_functions.py @@ -0,0 +1,118 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Special functions that only make sense for AutoGraph. + +These functions are meant to ensure feature parity between Python and AutoGraph, +so that the exact same code works in both modes. In general, AutoGraph will +replace these calls. +""" + +from tensorflow.python.autograph.operators import data_structures +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import tensor_util + + +def _validate_list_constructor(elements, element_dtype, element_shape): + """Validates the inputs of tensor_list.""" + if element_dtype is not None and element_shape is not None: + return + if tensor_util.is_tf_type(elements): + return + if isinstance(elements, (list, tuple)): + if elements: + return + else: + raise ValueError( + 'element_dtype and element_shape are required when elements are' + ' empty') + + raise ValueError( + 'unknown type for elements: {}; only Tensor, list and tuple are' + ' allowed'.format(type(elements))) + + +def match_staging_level(value, like_value): + """Casts a value to be staged at the same level as another.""" + if tensor_util.is_tf_type(like_value): + return constant_op.constant(value) + return value + + +def tensor_list(elements, + element_dtype=None, + element_shape=None, + use_tensor_array=False): + """Creates an tensor list and populates it with the given elements. + + This function provides a more uniform access to tensor lists and tensor + arrays, and allows optional initialization. + + Note: this function is a simplified wrapper. If you need greater control, + it is recommended to use the underlying implementation directly. + + Args: + elements: Iterable[tf.Tensor, ...], the elements to initially fill the list + with + element_dtype: Optional[tf.DType], data type for the elements in the list; + required if the list is empty + element_shape: Optional[tf.TensorShape], shape for the elements in the list; + required if the list is empty + use_tensor_array: bool, whether to use the more compatible but restrictive + tf.TensorArray implementation + Returns: + Union[tf.Tensor, tf.TensorArray], the new list. + Raises: + ValueError: for invalid arguments + """ + _validate_list_constructor(elements, element_dtype, element_shape) + if use_tensor_array: + return data_structures.tf_tensor_array_new(elements, element_dtype, + element_shape) + else: + return data_structures.tf_tensor_list_new(elements, element_dtype, + element_shape) + + +def stack(list_or_tensor, element_dtype=None, strict=True): + """Stacks the input, if it admits the notion of stacking. + + For example, a list of tensors can be stacked into a larger tensor. This + function is similar to tf.stack, but it accepts non-lists and lists of + non-tensors as arguments. In the latter case, the function does nothing. + + Args: + list_or_tensor: Any + element_dtype: tf.DType, optional dtypedtype for the elements in the list. + Required if the input is stackable, and the list is untyped. + strict: bool, if True an error is raised if the input is not stackable. + Otherwise the function is a no-op. + + Returns: + Any, if the input is stackable, the result will be a tf.Tensor. Otherwise, + if strict=False, the result will be list_or_tensor. + + Raises: + ValueError: if strict=True and the input is not stackable. + """ + if strict: + def raise_error(x): + raise ValueError('%s must be stackable when strict=True' % x) + original_call = raise_error + else: + original_call = lambda x: x + return data_structures.list_stack( + list_or_tensor, + data_structures.ListStackOpts( + element_dtype=element_dtype, original_call=original_call)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/anno.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/anno.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94d695003df83a0b8ae4dcd85d704a90e3e1e2de Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/anno.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/ast_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/ast_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74e9799f3ea381449227fbc1c0e22824911e9155 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/ast_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/cache.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a004fd2b2e82df79db7542d24fcc69dd413e02d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/cache.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/cfg.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/cfg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20a77e8a7e5e47dd624e093fa10629a5f6bd2ffc Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/cfg.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/error_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/error_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..049baa346d72a63484baff1c6acadba940d2953c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/error_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/errors.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbc842a1153a82a5765126739ccfc466d45a9fc4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/errors.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/gast_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/gast_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4624e170cfb93e1f06aa2648953e628cd9cbec91 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/gast_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/inspect_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/inspect_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5df21bf508fbda06ad06b83b1d1a8932f597966a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/inspect_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/loader.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23defe6c64fa703c1597bc5562cd78e4b7293b2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/loader.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/naming.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/naming.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3550b30accdfc0167fb0887f15bb638f14f851b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/naming.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/origin_info.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/origin_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3a584d880a3a94cf1ea9be58aecabcac98d377a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/origin_info.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/parser.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccf6b70eb6eac63898b3ad8ad2fa7e82d646b761 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/parser.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/pretty_printer.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/pretty_printer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94f6cec554221f5950ffd1420aa02d004dc945b1 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/pretty_printer.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/qual_names.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/qual_names.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d431215084fefe9a162f97148a9e32298125961e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/qual_names.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/templates.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/templates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18549eab811523c419fd3939ae2aa7363c1e9186 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/templates.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/transformer.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..587e14c08c2b52b67bfbbb200c5055c9ee160cd0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/transformer.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/transpiler.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/transpiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f578558a5c9c8d41d7660bb140881473bfead8f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/__pycache__/transpiler.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/anno.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/anno.py new file mode 100644 index 0000000000000000000000000000000000000000..59ff703587cbb1fefd81a83008381d87b82499d9 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/anno.py @@ -0,0 +1,174 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""AST node annotation support. + +Adapted from Tangent. +""" + +import enum + +# pylint:disable=g-bad-import-order + +import gast +# pylint:enable=g-bad-import-order + + +# TODO(mdan): Shorten the names. +# These names are heavily used, and anno.blaa +# TODO(mdan): Replace the attr-dict mechanism with a more typed solution. + + +class NoValue(enum.Enum): + """Base class for different types of AST annotations.""" + + def of(self, node, default=None): + return getanno(node, self, default=default) + + def add_to(self, node, value): + setanno(node, self, value) + + def exists(self, node): + return hasanno(node, self) + + def __repr__(self): + return str(self.name) + + +class Basic(NoValue): + """Container for basic annotation keys. + + The enum values are used strictly for documentation purposes. + """ + + QN = 'Qualified name, as it appeared in the code. See qual_names.py.' + SKIP_PROCESSING = ( + 'This node should be preserved as is and not processed any further.') + INDENT_BLOCK_REMAINDER = ( + 'When a node is annotated with this, the remainder of the block should' + ' be indented below it. The annotation contains a tuple' + ' (new_body, name_map), where `new_body` is the new indented block and' + ' `name_map` allows renaming symbols.') + ORIGIN = ('Information about the source code that converted code originated' + ' from. See origin_information.py.') + DIRECTIVES = ('User directives associated with a statement or a variable.' + ' Typically, they affect the immediately-enclosing statement.') + + EXTRA_LOOP_TEST = ( + 'A special annotation containing additional test code to be executed in' + ' for loops.') + + +class Static(NoValue): + """Container for static analysis annotation keys. + + The enum values are used strictly for documentation purposes. + """ + + # Symbols + # These flags are boolean. + IS_PARAM = 'Symbol is a parameter to the function being analyzed.' + + # Scopes + # Scopes are represented by objects of type activity.Scope. + SCOPE = 'The scope for the annotated node. See activity.py.' + # TODO(mdan): Drop these in favor of accessing the child's SCOPE. + ARGS_SCOPE = 'The scope for the argument list of a function call.' + COND_SCOPE = 'The scope for the test node of a conditional statement.' + BODY_SCOPE = ( + 'The scope for the main body of a statement (True branch for if ' + 'statements, main body for loops).') + ORELSE_SCOPE = ( + 'The scope for the orelse body of a statement (False branch for if ' + 'statements, orelse body for loops).') + + # Static analysis annotations. + DEFINITIONS = ( + 'Reaching definition information. See reaching_definitions.py.') + ORIG_DEFINITIONS = ( + 'The value of DEFINITIONS that applied to the original code before any' + ' conversion.') + DEFINED_FNS_IN = ( + 'Local function definitions that may exist when exiting the node. See' + ' reaching_fndefs.py') + DEFINED_VARS_IN = ( + 'Symbols defined when entering the node. See reaching_definitions.py.') + LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.') + LIVE_VARS_IN = ('Symbols live when entering the node. See liveness.py.') + TYPES = 'Static type information. See type_inference.py.' + CLOSURE_TYPES = 'Types of closure symbols at each detected call site.' + VALUE = 'Static value information. See type_inference.py.' + + +FAIL = object() + + +def keys(node, field_name='___pyct_anno'): + if not hasattr(node, field_name): + return frozenset() + return frozenset(getattr(node, field_name).keys()) + + +def getanno(node, key, default=FAIL, field_name='___pyct_anno'): + if (default is FAIL or (hasattr(node, field_name) and + (key in getattr(node, field_name)))): + return getattr(node, field_name)[key] + return default + + +def hasanno(node, key, field_name='___pyct_anno'): + return hasattr(node, field_name) and key in getattr(node, field_name) + + +def setanno(node, key, value, field_name='___pyct_anno'): + annotations = getattr(node, field_name, {}) + setattr(node, field_name, annotations) + annotations[key] = value + + # So that the annotations survive gast_to_ast() and ast_to_gast() + if field_name not in node._fields: + node._fields += (field_name,) + + +def delanno(node, key, field_name='___pyct_anno'): + annotations = getattr(node, field_name) + del annotations[key] + if not annotations: + delattr(node, field_name) + node._fields = tuple(f for f in node._fields if f != field_name) + + +def copyanno(from_node, to_node, key, field_name='___pyct_anno'): + if hasanno(from_node, key, field_name=field_name): + setanno( + to_node, + key, + getanno(from_node, key, field_name=field_name), + field_name=field_name) + + +def dup(node, copy_map, field_name='___pyct_anno'): + """Recursively copies annotations in an AST tree. + + Args: + node: ast.AST + copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination + key. All annotations with the source key will be copied to identical + annotations with the destination key. + field_name: str + """ + for n in gast.walk(node): + for k in copy_map: + if hasanno(n, k, field_name): + setanno(n, copy_map[k], getanno(n, k, field_name), field_name) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/ast_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/ast_util.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf13fad82827d3508e13d2f43c54d45b81c9c39 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/ast_util.py @@ -0,0 +1,344 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""AST manipulation utilities.""" + +import ast + +import gast + +from tensorflow.python.autograph.pyct import anno +from tensorflow.python.autograph.pyct import parser +from tensorflow.python.autograph.pyct import qual_names + + +class CleanCopier(object): + """NodeTransformer-like visitor that copies an AST.""" + + def __init__(self, preserve_annos): + super(CleanCopier, self).__init__() + self.preserve_annos = preserve_annos + + def copy(self, node): + """Returns a deep copy of node (excluding some fields, see copy_clean).""" + + if isinstance(node, list): + return [self.copy(n) for n in node] + elif isinstance(node, tuple): + return tuple(self.copy(n) for n in node) + elif not isinstance(node, (gast.AST, ast.AST)): + # Assuming everything that's not an AST, list or tuple is a value type + # and may simply be assigned. + return node + + assert isinstance(node, (gast.AST, ast.AST)) + + new_fields = {} + for f in node._fields: + if not f.startswith('__') and hasattr(node, f): + new_fields[f] = self.copy(getattr(node, f)) + new_node = type(node)(**new_fields) + + if self.preserve_annos: + for k in self.preserve_annos: + anno.copyanno(node, new_node, k) + return new_node + + +def copy_clean(node, preserve_annos=None): + """Creates a deep copy of an AST. + + The copy will not include fields that are prefixed by '__', with the + exception of user-specified annotations. + + Args: + node: ast.AST + preserve_annos: Optional[Set[Hashable]], annotation keys to include in the + copy + Returns: + ast.AST + """ + return CleanCopier(preserve_annos).copy(node) + + +class SymbolRenamer(gast.NodeTransformer): + """Transformer that can rename symbols to a simple names.""" + + def __init__(self, name_map): + self.name_map = name_map + + def _process_name_node(self, node): + qn = anno.getanno(node, anno.Basic.QN) + if qn in self.name_map: + new_node = gast.Name( + str(self.name_map[qn]), + ctx=node.ctx, + annotation=None, + type_comment=None) + # All annotations get carried over. + for k in anno.keys(node): + anno.copyanno(node, new_node, k) + return new_node + return self.generic_visit(node) + + def _process_list_of_strings(self, names): + for i in range(len(names)): + qn = qual_names.QN(names[i]) + if qn in self.name_map: + names[i] = str(self.name_map[qn]) + return names + + def visit_Nonlocal(self, node): + node.names = self._process_list_of_strings(node.names) + return node + + def visit_Global(self, node): + node.names = self._process_list_of_strings(node.names) + return node + + def visit_Name(self, node): + return self._process_name_node(node) + + def visit_Attribute(self, node): + if anno.hasanno(node, anno.Basic.QN): + return self._process_name_node(node) + # Renaming attributes is not supported. + return self.generic_visit(node) + + def visit_FunctionDef(self, node): + qn = qual_names.QN(node.name) + if qn in self.name_map: + node.name = str(self.name_map[qn]) + return self.generic_visit(node) + + +def rename_symbols(node, name_map): + """Renames symbols in an AST. Requires qual_names annotations.""" + renamer = SymbolRenamer(name_map) + if isinstance(node, list): + return [renamer.visit(n) for n in node] + elif isinstance(node, tuple): + return tuple(renamer.visit(n) for n in node) + return renamer.visit(node) + + +def keywords_to_dict(keywords): + """Converts a list of ast.keyword objects to a dict.""" + keys = [] + values = [] + for kw in keywords: + keys.append(gast.Constant(kw.arg, kind=None)) + values.append(kw.value) + return gast.Dict(keys=keys, values=values) + + +class PatternMatcher(gast.NodeVisitor): + """Matches a node against a pattern represented by a node.""" + + def __init__(self, pattern): + self.pattern = pattern + self.pattern_stack = [] + self.matches = True + + def compare_and_visit(self, node, pattern): + self.pattern_stack.append(self.pattern) + self.pattern = pattern + self.generic_visit(node) + self.pattern = self.pattern_stack.pop() + + def no_match(self): + self.matches = False + return False + + def is_wildcard(self, p): + if isinstance(p, (list, tuple)) and len(p) == 1: + p, = p + if isinstance(p, gast.Name) and p.id == '_': + return True + if p == '_': + return True + return False + + def generic_visit(self, node): + if not self.matches: + return + + pattern = self.pattern + for f in node._fields: + if f.startswith('__'): + continue + + if not hasattr(node, f): + if hasattr(pattern, f) and getattr(pattern, f): + return self.no_match() + else: + continue + if not hasattr(pattern, f): + return self.no_match() + + v = getattr(node, f) + p = getattr(pattern, f) + + if self.is_wildcard(p): + continue + if isinstance(v, (list, tuple)): + if not isinstance(p, (list, tuple)) or len(v) != len(p): + return self.no_match() + for v_item, p_item in zip(v, p): + self.compare_and_visit(v_item, p_item) + elif isinstance(v, (gast.AST, ast.AST)): + if not isinstance(v, type(p)) and not isinstance(p, type(v)): + return self.no_match() + self.compare_and_visit(v, p) + else: + # Assume everything else is a value type. + if v != p: + return self.no_match() + + +def matches(node, pattern): + """Basic pattern matcher for AST. + + The pattern may contain wildcards represented by the symbol '_'. A node + matches a pattern if for every node in the tree, either there is a node of + the same type in pattern, or a Name node with id='_'. + + Args: + node: ast.AST + pattern: ast.AST + Returns: + bool + """ + if isinstance(pattern, str): + pattern = parser.parse_str(pattern) + + matcher = PatternMatcher(pattern) + matcher.visit(node) + return matcher.matches + + +# TODO(mdan): Once we have error tracing, we may be able to just go to SSA. +def apply_to_single_assignments(targets, values, apply_fn): + """Applies a function to each individual assignment. + + This function can process a possibly-unpacked (e.g. a, b = c, d) assignment. + It tries to break down the unpacking if possible. In effect, it has the same + effect as passing the assigned values in SSA form to apply_fn. + + Examples: + + The following will result in apply_fn(a, c), apply_fn(b, d): + + a, b = c, d + + The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]): + + a, b = c + + The following will result in apply_fn(a, (b, c)): + + a = b, c + + It uses the visitor pattern to allow subclasses to process single + assignments individually. + + Args: + targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be + used with the targets field of an ast.Assign node + values: ast.AST + apply_fn: Callable[[ast.AST, ast.AST], None], called with the + respective nodes of each single assignment + """ + if not isinstance(targets, (list, tuple)): + targets = (targets,) + for target in targets: + if isinstance(target, (gast.Tuple, gast.List)): + for i in range(len(target.elts)): + target_el = target.elts[i] + if isinstance(values, (gast.Tuple, gast.List)): + value_el = values.elts[i] + else: + idx = parser.parse_expression(str(i)) + value_el = gast.Subscript(values, idx, ctx=gast.Load()) + apply_to_single_assignments(target_el, value_el, apply_fn) + else: + apply_fn(target, values) + + +def parallel_walk(node, other): + """Walks two ASTs in parallel. + + The two trees must have identical structure. + + Args: + node: Union[ast.AST, Iterable[ast.AST]] + other: Union[ast.AST, Iterable[ast.AST]] + Yields: + Tuple[ast.AST, ast.AST] + Raises: + ValueError: if the two trees don't have identical structure. + """ + if isinstance(node, (list, tuple)): + node_stack = list(node) + else: + node_stack = [node] + + if isinstance(other, (list, tuple)): + other_stack = list(other) + else: + other_stack = [other] + + while node_stack and other_stack: + assert len(node_stack) == len(other_stack) + n = node_stack.pop() + o = other_stack.pop() + + if ((not isinstance(n, (ast.AST, gast.AST, str)) and n is not None) or + (not isinstance(o, (ast.AST, gast.AST, str)) and n is not None) or + n.__class__.__name__ != o.__class__.__name__): + raise ValueError('inconsistent nodes: {} ({}) and {} ({})'.format( + n, n.__class__.__name__, o, o.__class__.__name__)) + + yield n, o + + if isinstance(n, str): + assert isinstance(o, str), 'The check above should have ensured this' + continue + if n is None: + assert o is None, 'The check above should have ensured this' + continue + + for f in n._fields: + n_child = getattr(n, f, None) + o_child = getattr(o, f, None) + if f.startswith('__') or n_child is None or o_child is None: + continue + + if isinstance(n_child, (list, tuple)): + if (not isinstance(o_child, (list, tuple)) or + len(n_child) != len(o_child)): + raise ValueError( + 'inconsistent values for field {}: {} and {}'.format( + f, n_child, o_child)) + node_stack.extend(n_child) + other_stack.extend(o_child) + + elif isinstance(n_child, (gast.AST, ast.AST)): + node_stack.append(n_child) + other_stack.append(o_child) + + elif n_child != o_child: + raise ValueError( + 'inconsistent values for field {}: {} and {}'.format( + f, n_child, o_child)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/cache.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..2d125e687ef842394d4117bc03276a69064d528f --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/cache.py @@ -0,0 +1,93 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Caching utilities.""" + +import inspect +import weakref + + +# TODO(mdan): Add a garbage collection hook for cleaning up modules. +class _TransformedFnCache(object): + """Generic hierarchical cache for transformed functions. + + The keys are soft references (i.e. they are discarded when the key is + destroyed) created from the source function by `_get_key`. The subkeys are + strong references and can be any value. Typically they identify different + kinds of transformation. + """ + + __slots__ = ('_cache',) + + def __init__(self): + self._cache = weakref.WeakKeyDictionary() + + def _get_key(self, entity): + raise NotImplementedError('subclasses must override') + + def has(self, entity, subkey): + key = self._get_key(entity) + parent = self._cache.get(key, None) + if parent is None: + return False + return subkey in parent + + def __getitem__(self, entity): + key = self._get_key(entity) + parent = self._cache.get(key, None) + if parent is None: + # The bucket is initialized to support this usage: + # cache[key][subkey] = value + self._cache[key] = parent = {} + return parent + + def __len__(self): + return len(self._cache) + + +class CodeObjectCache(_TransformedFnCache): + """A function cache based on code objects. + + Code objects are good proxies for the source code of a function. + + This cache efficiently handles functions that share code objects, such as + functions defined in a loop, bound methods, etc. + + The cache falls back to the function object, if it doesn't have a code object. + """ + + def _get_key(self, entity): + if hasattr(entity, '__code__'): + return entity.__code__ + else: + return entity + + +class UnboundInstanceCache(_TransformedFnCache): + """A function cache based on unbound function objects. + + Using the function for the cache key allows efficient handling of object + methods. + + Unlike the _CodeObjectCache, this discriminates between different functions + even if they have the same code. This is needed for decorators that may + masquerade as another function. + """ + + def _get_key(self, entity): + if inspect.ismethod(entity): + return entity.__func__ + return entity + + diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/cfg.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/cfg.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4f0ac15919e692255cf6fde57d61fb77a1c3ec --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/cfg.py @@ -0,0 +1,976 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Control flow graph (CFG) structure for Python AST representation. + +The CFG is a digraph with edges representing valid control flow. Each +node is associated with exactly one AST node, but not all AST nodes may have +a corresponding CFG counterpart. + +Once built, the CFG itself is immutable, but the values it holds need not be; +they are usually annotated with information extracted by walking the graph. + +Tip: Use `Graph.as_dot` to visualize the CFG using any DOT viewer. + +Note: the CFG tries to include all code paths that MAY be taken, with a single +notable exception: + * function calls do not generate edges corresponding to exceptions they may + raise (i.e. a function call in the middle of a block does not return or jump + to any except or finally block) +TODO(mdan): Consider adding the edges above. They'd only add ~O(n) edges. +TODO(mdan): Alternatively, consider adding an edge from try to all its excepts. +""" + +# TODO(mdan): The notion of 'statements' below is inaccurate. +# They should rather be called 'block statements', because they include +# statements that may have a body, e.g. if and while. + +import collections +import enum +import weakref + +import astunparse +import gast + +from tensorflow.python.autograph.pyct import anno + + +class Node(object): + """A node in the CFG. + + Although new instances of this class are mutable, the objects that a user + finds in the CFG are typically not. + + The nodes represent edges in the CFG graph, and maintain pointers to allow + efficient walking in both forward and reverse order. The following property + holds for all nodes: "child in node.next" iff "node in child.prev". + + Attributes: + next: FrozenSet[Node, ...], the nodes that follow this node, in control flow + order + prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse + control flow order + ast_node: ast.AST, the AST node corresponding to this CFG node + """ + + def __init__(self, next_, prev, ast_node): + self.next = next_ + self.prev = prev + self.ast_node = ast_node + + def freeze(self): + self.next = frozenset(self.next) + # Assumption: All CFG nodes have identical life spans, because the graph + # owns them. Nodes should never be used outside the context of an existing + # graph. + self.prev = weakref.WeakSet(self.prev) + + def __repr__(self): + if isinstance(self.ast_node, gast.FunctionDef): + return 'def %s' % self.ast_node.name + elif isinstance(self.ast_node, gast.ClassDef): + return 'class %s' % self.ast_node.name + elif isinstance(self.ast_node, gast.withitem): + # TODO(xjun): remove use of astunparse + return astunparse.unparse(self.ast_node.context_expr).strip() + return astunparse.unparse(self.ast_node).strip() + + +class Graph( + collections.namedtuple( + 'Graph', + ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])): + """A Control Flow Graph. + + The CFG maintains an index to allow looking up a CFG node by the AST node to + which it is associated. The index can also be enumerated in top-down, depth + first order. + + Walking the graph in forward or reverse order is supported by double + parent-child links. + + Note: the error nodes are not wired to their corresponding finally guards, + because these are shared, and wiring them would create a reverse path from + normal control flow into the error nodes, which we want to avoid. + + The graph also maintains edges corresponding to higher level statements + like for-else loops. A node is considered successor of a statement if there + is an edge from a node that is lexically a child of that statement to a node + that is not. Statement predecessors are analogously defined. + + Attributes: + entry: Node, the entry node + exit: FrozenSet[Node, ...], the exit nodes + error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised + error (errors propagated from function calls are not accounted) + index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node + stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes + to their predecessor CFG nodes + stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes + to their successor CFG nodes + """ + + def __repr__(self): + return self.as_dot() + + def as_dot(self): + """Print CFG in DOT format.""" + result = 'digraph CFG {\n' + for node in self.index.values(): + result += ' %s [label="%s"];\n' % (id(node), node) + for node in self.index.values(): + for next_ in node.next: + result += ' %s -> %s;\n' % (id(node), id(next_)) + result += '}' + return result + + +class _WalkMode(enum.Enum): + FORWARD = 1 + REVERSE = 2 + + +# TODO(mdan): Rename to DataFlowAnalyzer. +# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions. +class GraphVisitor(object): + """Base class for a CFG visitors. + + This implementation is not thread safe. + + The visitor has some facilities to simplify dataflow analyses. In particular, + it allows revisiting the nodes at the decision of the subclass. This can be + used to visit the graph until the state reaches a fixed point. + + For more details on dataflow analysis, see + https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf + + Note: the literature generally suggests visiting successor nodes only when the + state of the current node changed, regardless of whether that successor has + ever been visited. This implementation visits every successor at least once. + + Attributes: + graph: Graph + in_: Dict[Node, Any], stores node-keyed state during a visit + out: Dict[Node, Any], stores node-keyed state during a visit + """ + + def __init__(self, graph): + self.graph = graph + self.reset() + + def init_state(self, node): + """State initialization function. + + Optional to overload. + + An in/out state slot will be created for each node in the graph. Subclasses + must overload this to control what that is initialized to. + + Args: + node: Node + """ + raise NotImplementedError('Subclasses must implement this.') + + # TODO(mdan): Rename to flow? + def visit_node(self, node): + """Visitor function. + + Args: + node: Node + + Returns: + bool, whether the node should be revisited; subclasses can visit every + reachable node exactly once by always returning False + """ + raise NotImplementedError('Subclasses must implement this.') + + def reset(self): + self.in_ = { + node: self.init_state(node) for node in self.graph.index.values() + } + self.out = { + node: self.init_state(node) for node in self.graph.index.values() + } + + def can_ignore(self, node): + """Returns True if the node can safely be assumed not to touch variables.""" + ast_node = node.ast_node + if anno.hasanno(ast_node, anno.Basic.SKIP_PROCESSING): + return True + return isinstance(ast_node, + (gast.Break, gast.Continue, gast.Raise, gast.Pass)) + + def _visit_internal(self, mode): + """Visits the CFG, breadth-first.""" + assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE) + if mode == _WalkMode.FORWARD: + open_ = [self.graph.entry] + elif mode == _WalkMode.REVERSE: + open_ = list(self.graph.exit) + closed = set() + + while open_: + node = open_.pop(0) + closed.add(node) + + should_revisit = self.visit_node(node) + + if mode == _WalkMode.FORWARD: + children = node.next + elif mode == _WalkMode.REVERSE: + children = node.prev + + for next_ in children: + if should_revisit or next_ not in closed: + open_.append(next_) + + def visit_forward(self): + self._visit_internal(_WalkMode.FORWARD) + + def visit_reverse(self): + self._visit_internal(_WalkMode.REVERSE) + + +class GraphBuilder(object): + """Builder that constructs a CFG from a given AST. + + This GraphBuilder facilitates constructing the DAG that forms the CFG when + nodes + are supplied in lexical order (i.e., top-down, depth first). Under these + conditions, it supports building patterns found in typical structured + programs. + + This builder ignores the flow generated by exceptions, which are assumed to + always be catastrophic and present purely for diagnostic purposes (e.g. to + print debug information). Statements like raise and try/catch sections are + allowed and will generate control flow edges, but ordinary statements are + assumed not to raise exceptions. + + Finally sections are also correctly interleaved between break/continue/return + nodes and their subsequent statements. + + Important concepts: + * nodes - nodes refer to CFG nodes; AST nodes are qualified explicitly + * leaf set - since the graph is constructed gradually, a leaf set maintains + the CFG nodes that will precede the node that the builder expects to + receive next; when an ordinary node is added, it is connected to the + existing leaves and it in turn becomes the new leaf + * jump nodes - nodes that should generate edges other than what + ordinary nodes would; these correspond to break, continue and return + statements + * sections - logical delimiters for subgraphs that require special + edges; there are various types of nodes, each admitting various + types of jump nodes; sections are identified by their corresponding AST + node + """ + + # TODO(mdan): Perhaps detail this in a markdown doc. + # TODO(mdan): Add exception support. + + def __init__(self, parent_ast_node): + self.reset() + self.parent = parent_ast_node + + def reset(self): + """Resets the state of this factory.""" + self.head = None + self.errors = set() + self.node_index = {} + + # TODO(mdan): Too many primitives. Use classes. + self.leaves = set() + + # Note: This mechanism requires that nodes are added in lexical order (top + # to bottom, depth first). + self.active_stmts = set() + self.owners = {} # type: Set[any] + self.forward_edges = set() # type: Tuple[Node, Node] # (from, to) + + self.finally_sections = {} + # Dict values represent (entry, exits) + self.finally_section_subgraphs = { + } # type: Dict[ast.AST, Tuple[Node, Set[Node]]] + # Whether the guard section can be reached from the statement that precedes + # it. + self.finally_section_has_direct_flow = {} + # Finally sections that await their first node. + self.pending_finally_sections = set() + + # Exit jumps keyed by the section they affect. + self.exits = {} + + # The entry of loop sections, keyed by the section. + self.section_entry = {} + # Continue jumps keyed by the section they affect. + self.continues = {} + + # Raise jumps keyed by the except section guarding them. + self.raises = {} + + # The entry of conditional sections, keyed by the section. + self.cond_entry = {} + # Lists of leaf nodes corresponding to each branch in the section. + self.cond_leaves = {} + + def _connect_nodes(self, first, second): + """Connects nodes to signify that control flows from first to second. + + Args: + first: Union[Set[Node, ...], Node] + second: Node + """ + if isinstance(first, Node): + first.next.add(second) + second.prev.add(first) + self.forward_edges.add((first, second)) + else: + for node in first: + self._connect_nodes(node, second) + + def _add_new_node(self, ast_node): + """Grows the graph by adding a CFG node following the current leaves.""" + if ast_node in self.node_index: + raise ValueError('%s added twice' % ast_node) + # Assumption: All CFG nodes have identical life spans, because the graph + # owns them. Nodes should never be used outside the context of an existing + # graph. + node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node) + self.node_index[ast_node] = node + self.owners[node] = frozenset(self.active_stmts) + + if self.head is None: + self.head = node + + for leaf in self.leaves: + self._connect_nodes(leaf, node) + + # If any finally section awaits its first node, populate it. + for section_id in self.pending_finally_sections: + self.finally_section_subgraphs[section_id][0] = node + self.pending_finally_sections = set() + + return node + + def begin_statement(self, stmt): + """Marks the beginning of a statement. + + Args: + stmt: Hashable, a key by which the statement can be identified in the + CFG's stmt_prev and stmt_next attributes + """ + self.active_stmts.add(stmt) + + def end_statement(self, stmt): + """Marks the end of a statement. + + Args: + stmt: Hashable, a key by which the statement can be identified in the + CFG's stmt_prev and stmt_next attributes; must match a key previously + passed to begin_statement. + """ + self.active_stmts.remove(stmt) + + def add_ordinary_node(self, ast_node): + """Grows the graph by adding an ordinary CFG node. + + Ordinary nodes are followed by the next node, in lexical order, that is, + they become the new leaf set. + + Args: + ast_node: ast.AST + + Returns: + Node + """ + node = self._add_new_node(ast_node) + self.leaves = set((node,)) + return node + + def _add_jump_node(self, ast_node, guards): + """Grows the graph by adding a jump node. + + Jump nodes are added to the current leaf set, and the leaf set becomes + empty. If the jump node is the last in a cond section, then it may be added + back to the leaf set by a separate mechanism. + + Args: + ast_node: ast.AST + guards: Tuple[ast.AST, ...], the finally sections active for this node + + Returns: + Node + """ + node = self._add_new_node(ast_node) + self.leaves = set() + # The guards themselves may not yet be complete, and will be wired later. + self.finally_sections[node] = guards + return node + + def _connect_jump_to_finally_sections(self, node): + """Connects a jump node to the finally sections protecting it.""" + cursor = set((node,)) + if node not in self.finally_sections: + return cursor + for guard_section_id in self.finally_sections[node]: + guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id] + self._connect_nodes(cursor, guard_begin) + cursor = guard_ends + del self.finally_sections[node] + # TODO(mdan): Should garbage-collect finally_section_subgraphs. + return cursor + + def add_exit_node(self, ast_node, section_id, guards): + """Grows the graph by adding an exit node. + + This node becomes an exit for the current section. + + Args: + ast_node: ast.AST + section_id: Hashable, the node for which ast_node should be considered to + be an exit node + guards: Tuple[ast.AST, ...], the finally sections that guard ast_node + + Returns: + Node + """ + node = self._add_jump_node(ast_node, guards) + self.exits[section_id].add(node) + return node + + def add_continue_node(self, ast_node, section_id, guards): + """Grows the graph by adding a reentry node. + + This node causes control flow to go back to the loop section's entry. + + Args: + ast_node: ast.AST + section_id: Hashable, the node for which ast_node should be considered to + be an exit node + guards: Tuple[ast.AST, ...], the finally sections that guard ast_node + """ + node = self._add_jump_node(ast_node, guards) + self.continues[section_id].add(node) + + def connect_raise_node(self, node, except_guards): + """Adds extra connection between a raise node and containing except guards. + + The node is a graph node, not an ast node. + + Args: + node: Node + except_guards: Tuple[ast.AST, ...], the except sections that guard node + """ + for guard in except_guards: + if guard in self.raises: + self.raises[guard].append(node) + else: + self.raises[guard] = [node] + + def enter_section(self, section_id): + """Enters a regular section. + + Regular sections admit exit jumps, which end the section. + + Args: + section_id: Hashable, the same node that will be used in calls to the + ast_node arg passed to add_exit_node + """ + assert section_id not in self.exits + self.exits[section_id] = set() + + def exit_section(self, section_id): + """Exits a regular section.""" + + # Exits are jump nodes, which may be protected. + for exit_ in self.exits[section_id]: + self.leaves |= self._connect_jump_to_finally_sections(exit_) + + del self.exits[section_id] + + def enter_loop_section(self, section_id, entry_node): + """Enters a loop section. + + Loop sections define an entry node. The end of the section always flows back + to the entry node. These admit continue jump nodes which also flow to the + entry node. + + Args: + section_id: Hashable, the same node that will be used in calls to the + ast_node arg passed to add_continue_node + entry_node: ast.AST, the entry node into the loop (e.g. the test node for + while loops) + """ + assert section_id not in self.section_entry + assert section_id not in self.continues + self.continues[section_id] = set() + node = self.add_ordinary_node(entry_node) + self.section_entry[section_id] = node + + def exit_loop_section(self, section_id): + """Exits a loop section.""" + self._connect_nodes(self.leaves, self.section_entry[section_id]) + + # continues are jump nodes, which may be protected. + for reentry in self.continues[section_id]: + guard_ends = self._connect_jump_to_finally_sections(reentry) + self._connect_nodes(guard_ends, self.section_entry[section_id]) + + # Loop nodes always loop back. + self.leaves = set((self.section_entry[section_id],)) + + del self.continues[section_id] + del self.section_entry[section_id] + + def enter_cond_section(self, section_id): + """Enters a conditional section. + + Conditional sections define an entry node, and one or more branches. + + Args: + section_id: Hashable, the same node that will be used in calls to the + section_id arg passed to new_cond_branch + """ + + assert section_id not in self.cond_entry + assert section_id not in self.cond_leaves + self.cond_leaves[section_id] = [] + + def new_cond_branch(self, section_id): + """Begins a new branch in a cond section.""" + assert section_id in self.cond_leaves + + if section_id in self.cond_entry: + # Subsequent splits move back to the split point, and memorize the + # current leaves. + self.cond_leaves[section_id].append(self.leaves) + self.leaves = self.cond_entry[section_id] + else: + # If this is the first time we split a section, just remember the split + # point. + self.cond_entry[section_id] = self.leaves + + def exit_cond_section(self, section_id): + """Exits a conditional section.""" + for split in self.cond_leaves[section_id]: + self.leaves |= split + del self.cond_entry[section_id] + del self.cond_leaves[section_id] + + def enter_except_section(self, section_id): + """Enters an except section.""" + if section_id in self.raises: + self.leaves.update(self.raises[section_id]) + + def enter_finally_section(self, section_id): + """Enters a finally section.""" + # TODO(mdan): This, not the caller, should track the active sections. + self.finally_section_subgraphs[section_id] = [None, None] + if self.leaves: + self.finally_section_has_direct_flow[section_id] = True + else: + self.finally_section_has_direct_flow[section_id] = False + self.pending_finally_sections.add(section_id) + + def exit_finally_section(self, section_id): + """Exits a finally section.""" + assert section_id not in self.pending_finally_sections, 'Empty finally?' + self.finally_section_subgraphs[section_id][1] = self.leaves + # If the guard can only be reached by a jump, then it will not flow + # into the statement that follows it. + if not self.finally_section_has_direct_flow[section_id]: + self.leaves = set() + del self.finally_section_has_direct_flow[section_id] + + def build(self): + """Returns the CFG accumulated so far and resets the builder. + + Returns: + Graph + """ + # Freeze the nodes. + for node in self.node_index.values(): + node.freeze() + + # Build the statement edges. + stmt_next = {} + stmt_prev = {} + + for node in self.node_index.values(): + for stmt in self.owners[node]: + if stmt not in stmt_prev: + stmt_prev[stmt] = set() + if stmt not in stmt_next: + stmt_next[stmt] = set() + + for first, second in self.forward_edges: + stmts_exited = self.owners[first] - self.owners[second] + for stmt in stmts_exited: + stmt_next[stmt].add(second) + stmts_entered = self.owners[second] - self.owners[first] + for stmt in stmts_entered: + stmt_prev[stmt].add(first) + for stmt in stmt_next: + stmt_next[stmt] = frozenset(stmt_next[stmt]) + for stmt in stmt_prev: + stmt_prev[stmt] = frozenset(stmt_prev[stmt]) + + # Construct the final graph object. + result = Graph( + entry=self.head, + exit=self.leaves, + error=self.errors, + index=self.node_index, + stmt_prev=stmt_prev, + stmt_next=stmt_next) + + # Reset the state. + self.reset() + + return result + + +class AstToCfg(gast.NodeVisitor): + """Converts an AST to CFGs. + + A separate CFG will be constructed for each function. + """ + + def __init__(self): + super(AstToCfg, self).__init__() + + self.builder_stack = [] + self.builder = None + self.cfgs = {} + + self.lexical_scopes = [] + + def _enter_lexical_scope(self, node): + self.lexical_scopes.append(node) + + def _exit_lexical_scope(self, node): + leaving_node = self.lexical_scopes.pop() + assert node == leaving_node + + def _get_enclosing_finally_scopes(self, stop_at): + included = [] + for node in reversed(self.lexical_scopes): + if isinstance(node, gast.Try) and node.finalbody: + included.append(node) + if isinstance(node, stop_at): + return node, included + return None, included + + def _get_enclosing_except_scopes(self, stop_at): + included = [] + for node in reversed(self.lexical_scopes): + if isinstance(node, gast.Try) and node.handlers: + included.extend(node.handlers) + if isinstance(node, stop_at): + break + return included + + def _process_basic_statement(self, node): + self.generic_visit(node) + self.builder.add_ordinary_node(node) + + def _process_exit_statement(self, + node, + exits_nodes_of_type, + may_exit_via_except=False): + self.generic_visit(node) + # Note: this is safe because we process functions separately. + try_node, guards = self._get_enclosing_finally_scopes(exits_nodes_of_type) + assert try_node is not None, '{} that is not enclosed by any of {}'.format( + node, exits_nodes_of_type) + + node = self.builder.add_exit_node(node, try_node, guards) + + if may_exit_via_except: + except_guards = self._get_enclosing_except_scopes(exits_nodes_of_type) + self.builder.connect_raise_node(node, except_guards) + + def _process_continue_statement(self, node, *loops_to_nodes_of_type): + # Note: this is safe because we process functions separately. + try_node, guards = self._get_enclosing_finally_scopes( + tuple(loops_to_nodes_of_type)) + if try_node is None: + raise ValueError('%s that is not enclosed by any of %s' % + (node, loops_to_nodes_of_type)) + self.builder.add_continue_node(node, try_node, guards) + + def visit_ClassDef(self, node): + # We also keep the ClassDef node in the CFG, since it technically is a + # statement. + # For example, this is legal and allows executing user code: + # + # class Foo(bar()): + # pass + # + # It also has a scope: + # + # class Bar(object): + # a = 1 + if self.builder is None: + self.generic_visit(node) + return + + self.builder.add_ordinary_node(node) + + self.builder_stack.append(self.builder) + self.builder = GraphBuilder(node) + self._enter_lexical_scope(node) + + self._process_basic_statement(node) + + self._exit_lexical_scope(node) + # TODO(mdan): Track the CFG local to the class definition as well? + self.builder = self.builder_stack.pop() + + def _process_function_def(self, node, is_lambda): + # The function body is stored in a separate graph, because function + # definitions have effects very different from function calls. + if self.builder is not None: + self.builder.add_ordinary_node(node) + + self.builder_stack.append(self.builder) + self.builder = GraphBuilder(node) + + self._enter_lexical_scope(node) + self.builder.enter_section(node) + + self._process_basic_statement(node.args) + if is_lambda: + self._process_exit_statement(node.body, (gast.Lambda,)) + else: + for stmt in node.body: + self.visit(stmt) + + self.builder.exit_section(node) + self._exit_lexical_scope(node) + + self.cfgs[node] = self.builder.build() + self.builder = self.builder_stack.pop() + + def visit_FunctionDef(self, node): + self._process_function_def(node, is_lambda=False) + + def visit_Lambda(self, node): + self._process_function_def(node, is_lambda=True) + + def visit_Return(self, node): + self._process_exit_statement(node, (gast.FunctionDef,)) + + def visit_Import(self, node): + self._process_basic_statement(node) + + def visit_ImportFrom(self, node): + self._process_basic_statement(node) + + def visit_Expr(self, node): + self._process_basic_statement(node) + + def visit_NamedExpr(self, node): + # TODO(yileiyang): Add a test case once we have a newer astunparse version. + # NamedExpr was introduced in Python 3.8 and supported in gast 0.5.1+. + self._process_basic_statement(node) + + def visit_Assign(self, node): + self._process_basic_statement(node) + + def visit_AnnAssign(self, node): + self._process_basic_statement(node) + + def visit_AugAssign(self, node): + self._process_basic_statement(node) + + def visit_Pass(self, node): + self._process_basic_statement(node) + + def visit_Global(self, node): + self._process_basic_statement(node) + + def visit_Nonlocal(self, node): + self._process_basic_statement(node) + + def visit_Print(self, node): + self._process_basic_statement(node) + + def visit_Raise(self, node): + self._process_exit_statement( + node, (gast.FunctionDef,), may_exit_via_except=True) + self.builder.errors.add(node) + + def visit_Assert(self, node): + # Ignoring the effect of exceptions. + self._process_basic_statement(node) + + def visit_Delete(self, node): + self._process_basic_statement(node) + + def visit_If(self, node): + # No need to track ifs as lexical scopes, for now. + # Lexical scopes are generally tracked in order to be able to resolve the + # targets of jump statements like break/continue/etc. Since there is no + # statement that can interrupt a conditional, we don't need to track their + # lexical scope. That may change in the future. + self.builder.begin_statement(node) + + self.builder.enter_cond_section(node) + self._process_basic_statement(node.test) + + self.builder.new_cond_branch(node) + for stmt in node.body: + self.visit(stmt) + + self.builder.new_cond_branch(node) + for stmt in node.orelse: + self.visit(stmt) + + self.builder.exit_cond_section(node) + self.builder.end_statement(node) + + def visit_While(self, node): + self.builder.begin_statement(node) + self._enter_lexical_scope(node) + + self.builder.enter_section(node) + + self.generic_visit(node.test) + self.builder.enter_loop_section(node, node.test) + for stmt in node.body: + self.visit(stmt) + self.builder.exit_loop_section(node) + + # Note: although the orelse is technically part of the loop node, + # the statements inside it don't affect the loop itself. For example, a + # break in the loop's orelse will not affect the loop itself. + self._exit_lexical_scope(node) + + for stmt in node.orelse: + self.visit(stmt) + + self.builder.exit_section(node) + self.builder.end_statement(node) + + def visit_For(self, node): + self.builder.begin_statement(node) + self._enter_lexical_scope(node) + + self.builder.enter_section(node) + + # Note: Strictly speaking, this should be node.target + node.iter. + # However, the activity analysis accounts for this inconsistency, + # so dataflow analysis produces the correct values. + self.generic_visit(node.iter) + self.builder.enter_loop_section(node, node.iter) + # Also include the "extra loop test" annotation, to capture things like the + # control variable for return and break in for loops. + if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST): + self._process_basic_statement( + anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST)) + for stmt in node.body: + self.visit(stmt) + self.builder.exit_loop_section(node) + + # Note: although the orelse is technically part of the loop node, + # they don't count as loop bodies. For example, a break in the loop's + # orelse will affect the parent loop, not the current one. + self._exit_lexical_scope(node) + + for stmt in node.orelse: + self.visit(stmt) + + self.builder.exit_section(node) + self.builder.end_statement(node) + + def visit_Break(self, node): + self._process_exit_statement(node, ( + gast.While, + gast.For, + )) + + def visit_Continue(self, node): + self._process_continue_statement(node, ( + gast.While, + gast.For, + )) + + def visit_ExceptHandler(self, node): + self.builder.begin_statement(node) + self.builder.enter_except_section(node) + + if node.type is not None: + self.visit(node.type) + if node.name is not None: + self.visit(node.name) + + for stmt in node.body: + self.visit(stmt) + + self.builder.end_statement(node) + + def visit_Try(self, node): + self.builder.begin_statement(node) + self._enter_lexical_scope(node) + + # Note: the current simplification is that the try block fully executes + # regardless of whether an exception triggers or not. This is consistent + # with blocks free of try/except, which also don't account for the + # possibility of an exception being raised mid-block. + + for stmt in node.body: + self.visit(stmt) + # The orelse is an optional continuation of the body. + if node.orelse: + block_representative = node.orelse[0] + self.builder.enter_cond_section(block_representative) + self.builder.new_cond_branch(block_representative) + for stmt in node.orelse: + self.visit(stmt) + self.builder.new_cond_branch(block_representative) + self.builder.exit_cond_section(block_representative) + + self._exit_lexical_scope(node) + + if node.handlers: + # Using node would be inconsistent. Using the first handler node is also + # inconsistent, but less so. + block_representative = node.handlers[0] + self.builder.enter_cond_section(block_representative) + for block in node.handlers: + self.builder.new_cond_branch(block_representative) + self.visit(block) + self.builder.new_cond_branch(block_representative) + self.builder.exit_cond_section(block_representative) + + if node.finalbody: + self.builder.enter_finally_section(node) + for stmt in node.finalbody: + self.visit(stmt) + self.builder.exit_finally_section(node) + + self.builder.end_statement(node) + + def visit_With(self, node): + # TODO(mdan): Mark the context manager's exit call as exit guard. + for item in node.items: + self._process_basic_statement(item) + for stmt in node.body: + self.visit(stmt) + + +def build(node): + visitor = AstToCfg() + visitor.visit(node) + return visitor.cfgs diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/error_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/error_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..95fe2d78324ec1f71061923234e09b85075e73a7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/error_utils.py @@ -0,0 +1,230 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Code transformation exceptions.""" + +import collections + +from tensorflow.python.autograph.pyct import origin_info +from tensorflow.python.util import traceback_utils + + +class FrameInfo( + collections.namedtuple('FrameInfo', + ('filename', 'lineno', 'function_name', 'code', + 'is_converted', 'is_allowlisted'))): + + __slots__ = () + + +def _stack_trace_inside_mapped_code(tb, source_map, converter_filename): + """Summarizes inner traceback frames up to the call to a given function. + + This functions locates the innermost (i.e. most recent) frame that corresponds + to code that can be mapped by source_map originated from, and returns a + translated stack trace ending at that frame. If no such frame is found, the + entire stack trace is summarized. + + For example, the following code: + + def f(): + for i in tf.range(1): + z = y + i # z only defined here + + Would generate this traceback: + + + ag__.for_stmt(...) + + return _known_len_tf_for_stmt(iter_, extra_test, body, init_state) + <_known_len_tf_for_stmt> + _disallow_undefs_into_loop(*init_state) + <_disallow_undefs_into_loop> + raise ... + + Which is then processed into: + + + for i in tf.range(1): + + return _known_len_tf_for_stmt(iter_, extra_test, body, init_state) + <_known_len_tf_for_stmt> + _disallow_undefs_into_loop(*init_state) + <_disallow_undefs_into_loop> + raise ... + + Args: + tb: traceback.FrameSummary, The traceback corresponding to an error. + Typically, the output of traceback.Summary.extract(capture_locals=True). + source_map: Dict[LineLocation, OriginInfo], a source map as created by + origin_info.create_source_map. + converter_filename: str, the file path of the converted module. Call frames + corresponding to this module are elided and their preceding frames are + marked as allowlisted. Note that frames enclosing converted code are + dropped using a different mechanism. + + Returns: + List[FrameInfo] + """ + result_frames = [] + for filename, line_number, function_name, text in reversed(tb): + + loc = origin_info.LineLocation(filename=filename, lineno=line_number) + if loc in source_map: + origin = source_map[loc] + fi = FrameInfo( + filename=origin.loc.filename, + lineno=origin.loc.lineno, + function_name=origin.function_name, + code=origin.source_code_line, + is_converted=True, + is_allowlisted=False) + result_frames.append(fi) + break + + if filename == converter_filename: + if result_frames: + prev = result_frames[-1] + assert not prev.is_converted # See the if above. + fi = FrameInfo( + filename=prev.filename, + lineno=prev.lineno, + function_name=prev.function_name, + code=prev.code, + is_converted=False, + is_allowlisted=True) + result_frames[-1] = fi + continue + + fi = FrameInfo( + filename=filename, + lineno=line_number, + function_name=function_name, + code=text, + is_converted=False, + is_allowlisted=False) + result_frames.append(fi) + + return tuple(result_frames) + + +KNOWN_STRING_CONSTRUCTOR_ERRORS = ( + AssertionError, + AttributeError, + NameError, + NotImplementedError, + RuntimeError, + StopIteration, + TypeError, + UnboundLocalError, + ValueError, +) + + +# KeyError escapes newlines in strings. We create a special subclass +# that doesn't do that. Overriding the name for display purposes; hopefully +# that won't create too many surprises. +class MultilineMessageKeyError(KeyError): + + def __init__(self, message, original_key): + super(MultilineMessageKeyError, self).__init__(original_key) + self.__message = message + + def __str__(self): + return self.__message + +MultilineMessageKeyError.__name__ = KeyError.__name__ + + +class ErrorMetadataBase(object): + """Container objects attached to exceptions raised in user code. + + This metadata allows re-raising exceptions that occur in generated code, with + a custom error message that includes a stack trace relative to user-readable + code from which the generated code originated. + """ + + __slots__ = ('translated_stack', 'cause_message') + + def __init__(self, callsite_tb, cause_metadata, cause_message, source_map, + converter_filename): + translated_stack = _stack_trace_inside_mapped_code( + callsite_tb, source_map, converter_filename) + + if cause_metadata is None: + self.translated_stack = translated_stack + self.cause_message = cause_message + else: + # Daisy chain the translated stacks. + self.translated_stack = ( + cause_metadata.translated_stack + (translated_stack[-1],)) + self.cause_message = cause_metadata.cause_message + + def get_message(self): + """Returns the message for the underlying exception.""" + lines = [] + + lines.append('in user code:') + lines.append('') + + for frame_info in reversed(self.translated_stack): + if (traceback_utils.is_traceback_filtering_enabled() and + not traceback_utils.include_frame(frame_info.filename)): + continue + + # Same format with Python traceback. + formatted_line = (f' File "{frame_info.filename}", line ' + f'{frame_info.lineno}, in {frame_info.function_name}') + if frame_info.is_converted: + formatted_line += ' *' + elif frame_info.is_allowlisted: + formatted_line += ' **' + lines.append(formatted_line) + + if frame_info.code is None: + code_snippet = '' + else: + code_snippet = frame_info.code.strip() + lines.append(' {}'.format(code_snippet)) + + lines.append('') + + message_lines = self.cause_message.split('\n') + for i in range(len(message_lines)): + message_lines[i] = ' ' + message_lines[i] + lines.extend(message_lines) + + lines.append('') + + return '\n'.join(lines) + + def create_exception(self, source_error): + """Creates exception from source_error.""" + preferred_type = type(source_error) + to_ret = None + if preferred_type.__init__ is Exception.__init__: + to_ret = preferred_type(self.get_message()) + if preferred_type in KNOWN_STRING_CONSTRUCTOR_ERRORS: + to_ret = preferred_type(self.get_message()) + elif preferred_type is KeyError: + to_ret = MultilineMessageKeyError(self.get_message(), self.cause_message) + + if to_ret is not None: + return to_ret.with_traceback(source_error.__traceback__) + + def to_exception(self, source_error): + exc = self.create_exception(source_error) + exc.__suppress_context__ = True + exc.ag_error_metadata = self + return exc diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/errors.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..781480133793cdbd0d913f95fb91c94dc403d2c4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/errors.py @@ -0,0 +1,27 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Code transformation exceptions.""" + + +class PyCTError(Exception): + """Base class for all exceptions.""" + + +class UnsupportedLanguageElementError(PyCTError, NotImplementedError): + """Raised for code patterns that AutoGraph does not support.""" + + +class InaccessibleSourceCodeError(PyCTError, ValueError): + """Raised when inspect can not access source code.""" diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/gast_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/gast_util.py new file mode 100644 index 0000000000000000000000000000000000000000..bdbe50075516819051ae258c941004ed2bd779d3 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/gast_util.py @@ -0,0 +1,74 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gast compatibility library. Supports 0.2.2 and 0.3.2.""" +# TODO(mdan): Remove this file once it's safe to break compatibility. + +import functools + +import gast + + +GAST2 = hasattr(gast, 'Str') +GAST3 = not GAST2 + + +def _is_constant_gast_2(node): + return isinstance(node, (gast.Num, gast.Str, gast.Bytes, gast.Ellipsis, + gast.NameConstant)) + + +def _is_constant_gast_3(node): + return isinstance(node, gast.Constant) + + +def is_literal(node): + """Tests whether node represents a Python literal.""" + # Normal literals, True/False/None/Etc. in Python3 + if is_constant(node): + return True + + # True/False/None/Etc. in Python2 + if isinstance(node, gast.Name) and node.id in ['True', 'False', 'None']: + return True + + return False + + +def _is_ellipsis_gast_2(node): + return isinstance(node, gast.Ellipsis) + + +def _is_ellipsis_gast_3(node): + return isinstance(node, gast.Constant) and node.value == Ellipsis + + +if GAST2: + is_constant = _is_constant_gast_2 + is_ellipsis = _is_ellipsis_gast_2 + + Module = gast.Module + Name = gast.Name + Str = gast.Str + +elif GAST3: + is_constant = _is_constant_gast_3 + is_ellipsis = _is_ellipsis_gast_3 + + Module = functools.partial(gast.Module, type_ignores=None) # pylint:disable=invalid-name + Name = functools.partial(gast.Name, type_comment=None) # pylint:disable=invalid-name + Str = functools.partial(gast.Constant, kind=None) # pylint:disable=invalid-name + +else: + assert False diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/inspect_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/inspect_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b6a1d8fdb927ab719c94cba8f0cf98ea18f3a25b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/inspect_utils.py @@ -0,0 +1,321 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Live entity inspection utilities. + +This module contains whatever inspect doesn't offer out of the box. +""" + +import builtins +import inspect +import itertools +import linecache +import sys +import threading +import types + +from tensorflow.python.util import tf_inspect + +# This lock seems to help avoid linecache concurrency errors. +_linecache_lock = threading.Lock() + +# Cache all the builtin elements in a frozen set for faster lookup. +_BUILTIN_FUNCTION_IDS = frozenset(id(v) for v in builtins.__dict__.values()) + + +def islambda(f): + if not tf_inspect.isfunction(f): + return False + # TODO(mdan): Look into checking the only the code object. + if not (hasattr(f, '__name__') and hasattr(f, '__code__')): + return False + # Some wrappers can rename the function, but changing the name of the + # code object is harder. + return ((f.__name__ == '') or (f.__code__.co_name == '')) + + +def isnamedtuple(f): + """Returns True if the argument is a namedtuple-like.""" + if not (tf_inspect.isclass(f) and issubclass(f, tuple)): + return False + if not hasattr(f, '_fields'): + return False + fields = getattr(f, '_fields') + if not isinstance(fields, tuple): + return False + if not all(isinstance(f, str) for f in fields): + return False + return True + + +def isbuiltin(f): + """Returns True if the argument is a built-in function.""" + if id(f) in _BUILTIN_FUNCTION_IDS: + return True + elif isinstance(f, types.BuiltinFunctionType): + return True + elif inspect.isbuiltin(f): + return True + elif f is eval: + return True + else: + return False + + +def isconstructor(cls): + """Returns True if the argument is an object constructor. + + In general, any object of type class is a constructor, with the exception + of classes created using a callable metaclass. + See below for why a callable metaclass is not a trivial combination: + https://docs.python.org/2.7/reference/datamodel.html#customizing-class-creation + + Args: + cls: Any + + Returns: + Bool + """ + return (inspect.isclass(cls) and + not (issubclass(cls.__class__, type) and + hasattr(cls.__class__, '__call__') and + cls.__class__.__call__ is not type.__call__)) + + +def _fix_linecache_record(obj): + """Fixes potential corruption of linecache in the presence of functools.wraps. + + functools.wraps modifies the target object's __module__ field, which seems + to confuse linecache in special instances, for example when the source is + loaded from a .par file (see https://google.github.io/subpar/subpar.html). + + This function simply triggers a call to linecache.updatecache when a mismatch + was detected between the object's __module__ property and the object's source + file. + + Args: + obj: Any + """ + if hasattr(obj, '__module__'): + obj_file = inspect.getfile(obj) + obj_module = obj.__module__ + + # A snapshot of the loaded modules helps avoid "dict changed size during + # iteration" errors. + loaded_modules = tuple(sys.modules.values()) + for m in loaded_modules: + if hasattr(m, '__file__') and m.__file__ == obj_file: + if obj_module is not m: + linecache.updatecache(obj_file, m.__dict__) + + +def getimmediatesource(obj): + """A variant of inspect.getsource that ignores the __wrapped__ property.""" + with _linecache_lock: + _fix_linecache_record(obj) + lines, lnum = inspect.findsource(obj) + return ''.join(inspect.getblock(lines[lnum:])) + + +def getnamespace(f): + """Returns the complete namespace of a function. + + Namespace is defined here as the mapping of all non-local variables to values. + This includes the globals and the closure variables. Note that this captures + the entire globals collection of the function, and may contain extra symbols + that it does not actually use. + + Args: + f: User defined function. + + Returns: + A dict mapping symbol names to values. + """ + namespace = dict(f.__globals__) + closure = f.__closure__ + freevars = f.__code__.co_freevars + if freevars and closure: + for name, cell in zip(freevars, closure): + try: + namespace[name] = cell.cell_contents + except ValueError: + # Cell contains undefined variable, omit it from the namespace. + pass + return namespace + + +def getqualifiedname(namespace, object_, max_depth=5, visited=None): + """Returns the name by which a value can be referred to in a given namespace. + + If the object defines a parent module, the function attempts to use it to + locate the object. + + This function will recurse inside modules, but it will not search objects for + attributes. The recursion depth is controlled by max_depth. + + Args: + namespace: Dict[str, Any], the namespace to search into. + object_: Any, the value to search. + max_depth: Optional[int], a limit to the recursion depth when searching + inside modules. + visited: Optional[Set[int]], ID of modules to avoid visiting. + Returns: Union[str, None], the fully-qualified name that resolves to the value + o, or None if it couldn't be found. + """ + if visited is None: + visited = set() + + # Copy the dict to avoid "changed size error" during concurrent invocations. + # TODO(mdan): This is on the hot path. Can we avoid the copy? + namespace = dict(namespace) + + for name in namespace: + # The value may be referenced by more than one symbol, case in which + # any symbol will be fine. If the program contains symbol aliases that + # change over time, this may capture a symbol that will later point to + # something else. + # TODO(mdan): Prefer the symbol that matches the value type name. + if object_ is namespace[name]: + return name + + # If an object is not found, try to search its parent modules. + parent = tf_inspect.getmodule(object_) + if (parent is not None and parent is not object_ and parent is not namespace): + # No limit to recursion depth because of the guard above. + parent_name = getqualifiedname( + namespace, parent, max_depth=0, visited=visited) + if parent_name is not None: + name_in_parent = getqualifiedname( + parent.__dict__, object_, max_depth=0, visited=visited) + assert name_in_parent is not None, ( + 'An object should always be found in its owner module') + return '{}.{}'.format(parent_name, name_in_parent) + + if max_depth: + # Iterating over a copy prevents "changed size due to iteration" errors. + # It's unclear why those occur - suspecting new modules may load during + # iteration. + for name in namespace.keys(): + value = namespace[name] + if tf_inspect.ismodule(value) and id(value) not in visited: + visited.add(id(value)) + name_in_module = getqualifiedname(value.__dict__, object_, + max_depth - 1, visited) + if name_in_module is not None: + return '{}.{}'.format(name, name_in_module) + return None + + +def getdefiningclass(m, owner_class): + """Resolves the class (e.g. one of the superclasses) that defined a method.""" + method_name = m.__name__ + for super_class in inspect.getmro(owner_class): + if ((hasattr(super_class, '__dict__') and + method_name in super_class.__dict__) or + (hasattr(super_class, '__slots__') and + method_name in super_class.__slots__)): + return super_class + return owner_class + + +def getmethodclass(m): + """Resolves a function's owner, e.g. + + a method's class. + + Note that this returns the object that the function was retrieved from, not + necessarily the class where it was defined. + + This function relies on Python stack frame support in the interpreter, and + has the same limitations that inspect.currentframe. + + Limitations. This function will only work correctly if the owned class is + visible in the caller's global or local variables. + + Args: + m: A user defined function + + Returns: + The class that this function was retrieved from, or None if the function + is not an object or class method, or the class that owns the object or + method is not visible to m. + + Raises: + ValueError: if the class could not be resolved for any unexpected reason. + """ + + # Callable objects: return their own class. + if (not hasattr(m, '__name__') and hasattr(m, '__class__') and + hasattr(m, '__call__')): + if isinstance(m.__class__, type): + return m.__class__ + + # Instance and class: return the class of "self". + m_self = getattr(m, '__self__', None) + if m_self is not None: + if inspect.isclass(m_self): + return m_self + return m_self.__class__ + + # Class, static and unbound methods: search all defined classes in any + # namespace. This is inefficient but more robust a method. + owners = [] + caller_frame = tf_inspect.currentframe().f_back + try: + # TODO(mdan): This doesn't consider cell variables. + # TODO(mdan): This won't work if the owner is hidden inside a container. + # Cell variables may be pulled using co_freevars and the closure. + for v in itertools.chain(caller_frame.f_locals.values(), + caller_frame.f_globals.values()): + if hasattr(v, m.__name__): + candidate = getattr(v, m.__name__) + # Py2 methods may be bound or unbound, extract im_func to get the + # underlying function. + if hasattr(candidate, 'im_func'): + candidate = candidate.im_func + if hasattr(m, 'im_func'): + m = m.im_func + if candidate is m: + owners.append(v) + finally: + del caller_frame + + if owners: + if len(owners) == 1: + return owners[0] + + # If multiple owners are found, and are not subclasses, raise an error. + owner_types = tuple(o if tf_inspect.isclass(o) else type(o) for o in owners) + for o in owner_types: + if tf_inspect.isclass(o) and issubclass(o, tuple(owner_types)): + return o + raise ValueError('Found too many owners of %s: %s' % (m, owners)) + + return None + + +def getfutureimports(entity): + """Detects what future imports are necessary to safely execute entity source. + + Args: + entity: Any object + + Returns: + A tuple of future strings + """ + if not (tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity)): + return tuple() + return tuple( + sorted(name for name, value in entity.__globals__.items() + if getattr(value, '__module__', None) == '__future__')) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/loader.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..600ddd2707dc00cea8d5334b625e7d02ab256dd7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/loader.py @@ -0,0 +1,102 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converting AST to code and Python entities. + +Adapted from Tangent. +""" + +import atexit +import errno +import importlib +import os +import sys +import tempfile + +from tensorflow.python.autograph.pyct import origin_info +from tensorflow.python.autograph.pyct import parser + + +def _remove_file(file_name): + """Remove a file, if it exists.""" + try: + os.remove(file_name) + except OSError as e: + if e.errno == errno.ENOENT: + # The file disappeared. Ignore this. Temporary files might get + # cleaned up, especially if they reside in /tmp. + pass + else: + raise + + +def load_source(source, delete_on_exit): + """Loads the given source code as a Python module.""" + with tempfile.NamedTemporaryFile( + mode='w', + suffix='.py', + prefix='__autograph_generated_file', + delete=False, + encoding='utf-8') as f: + module_name = os.path.basename(f.name[:-3]) + file_name = f.name + f.write(source) + + if delete_on_exit: + atexit.register(lambda: _remove_file(file_name)) + + spec = importlib.util.spec_from_file_location(module_name, file_name) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + # TODO(mdan): Use our own garbage-collected cache instead of sys.modules. + sys.modules[module_name] = module + return module, file_name + + +def load_ast(nodes, + indentation=' ', + include_source_map=False, + delete_on_exit=True): + """Loads the given AST as a Python module. + + Compiling the AST code this way ensures that the source code is readable by + e.g. `pdb` or `inspect`. + + Args: + nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST + object. + indentation: Text, the string to use for indentation. + include_source_map: bool, whether return a source map. + delete_on_exit: bool, whether to delete the temporary file used for + compilation on exit. + + Returns: + Tuple[module, Text, Dict[LineLocation, OriginInfo]], containing: + the module containing the unparsed nodes, the source code corresponding to + nodes, and the source map. Is include_source_map is False, the source map + will be None. + """ + if not isinstance(nodes, (list, tuple)): + nodes = (nodes,) + + source = parser.unparse(nodes, indentation=indentation) + module, _ = load_source(source, delete_on_exit) + + if include_source_map: + source_map = origin_info.create_source_map(nodes, source, module.__file__) + else: + source_map = None + + # TODO(mdan): Return a structured object. + return module, source, source_map diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/naming.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/naming.py new file mode 100644 index 0000000000000000000000000000000000000000..d9bfbe7fc3bf39c2196cfd9fb74c1f180b69586d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/naming.py @@ -0,0 +1,53 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Symbol naming utilities.""" + +from tensorflow.python.autograph.pyct import qual_names + + +class Namer(object): + """Symbol name generator.""" + + def __init__(self, global_namespace): + self.global_namespace = global_namespace + self.generated_names = set() + + def new_symbol(self, name_root, reserved_locals): + """See control_flow.SymbolNamer.new_symbol.""" + # reserved_locals may contain QNs. + all_reserved_locals = set() + for s in reserved_locals: + if isinstance(s, qual_names.QN): + all_reserved_locals.update(s.qn) + elif isinstance(s, str): + all_reserved_locals.add(s) + else: + raise ValueError('Unexpected symbol type "%s"' % type(s)) + + pieces = name_root.split('_') + if pieces[-1].isdigit(): + name_root = '_'.join(pieces[:-1]) + n = int(pieces[-1]) + else: + n = 0 + new_name = name_root + + while (new_name in self.global_namespace or + new_name in all_reserved_locals or new_name in self.generated_names): + n += 1 + new_name = '%s_%d' % (name_root, n) + + self.generated_names.add(new_name) + return new_name diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/origin_info.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/origin_info.py new file mode 100644 index 0000000000000000000000000000000000000000..ffacbe46e9f52f1e35a8ecce3350c4a26fe0cbdd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/origin_info.py @@ -0,0 +1,296 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Container for origin source code information before AutoGraph compilation.""" +import collections +import difflib +import io +import os +import tokenize + +import gast + +from tensorflow.python.autograph.pyct import anno +from tensorflow.python.autograph.pyct import ast_util +from tensorflow.python.autograph.pyct import parser +from tensorflow.python.autograph.pyct import pretty_printer +from tensorflow.python.util import tf_inspect + + +class LineLocation( + collections.namedtuple('LineLocation', ('filename', 'lineno'))): + """Similar to Location, but without column information. + + Attributes: + filename: Text + lineno: int, 1-based + """ + pass + + +class Location( + collections.namedtuple('Location', ('filename', 'lineno', 'col_offset'))): + """Encodes code location information. + + Attributes: + filename: Text + lineno: int, 1-based + col_offset: int + line_loc: LineLocation + """ + + @property + def line_loc(self): + return LineLocation(self.filename, self.lineno) + + +class OriginInfo( + collections.namedtuple( + 'OriginInfo', + ('loc', 'function_name', 'source_code_line', 'comment'))): + """Container for information about the source code before conversion. + + Attributes: + loc: Location + function_name: Optional[Text] + source_code_line: Text + comment: Optional[Text] + """ + + def as_frame(self): + """Returns a 4-tuple consistent with the return of traceback.extract_tb.""" + return (self.loc.filename, self.loc.lineno, self.function_name, + self.source_code_line) + + def __repr__(self): + if self.loc.filename: + return '{}:{}:{}'.format( + os.path.split(self.loc.filename)[1], self.loc.lineno, + self.loc.col_offset) + return ':{}:{}'.format(self.loc.lineno, self.loc.col_offset) + + +# TODO(mdan): This source map should be a class - easier to refer to. +def create_source_map(nodes, code, filepath): + """Creates a source map between an annotated AST and the code it compiles to. + + Note: this function assumes nodes nodes, code and filepath correspond to the + same code. + + Args: + nodes: Iterable[ast.AST, ...], one or more AST modes. + code: Text, the source code in which nodes are found. + filepath: Text + + Returns: + Dict[LineLocation, OriginInfo], mapping locations in code to locations + indicated by origin annotations in node. + """ + reparsed_nodes = parser.parse(code, preamble_len=0, single_node=False) + for node in reparsed_nodes: + resolve(node, code, filepath, node.lineno, node.col_offset) + + source_map = {} + + try: + for before, after in ast_util.parallel_walk(nodes, reparsed_nodes): + # Note: generated code might not be mapped back to its origin. + # TODO(mdan): Generated code should always be mapped to something. + origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None) + final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None) + if origin_info is None or final_info is None: + continue + + # Note: the keys are by line only, excluding the column offset. + line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno) + + existing_origin = source_map.get(line_loc) + if existing_origin is not None: + # Overlaps may exist because of child nodes, but almost never to + # different line locations. Exception make decorated functions, where + # both lines are mapped to the same line in the AST. + + # Line overlaps: keep bottom node. + if existing_origin.loc.line_loc == origin_info.loc.line_loc: + if existing_origin.loc.lineno >= origin_info.loc.lineno: + continue + + # In case of column overlaps, keep the leftmost node. + if existing_origin.loc.col_offset <= origin_info.loc.col_offset: + continue + + source_map[line_loc] = origin_info + + except ValueError as err: + new_msg = 'Inconsistent ASTs detected. This is a bug. Cause: \n' + new_msg += str(err) + new_msg += 'Diff:\n' + + for n, rn in zip(nodes, reparsed_nodes): + nodes_str = pretty_printer.fmt(n, color=False, noanno=True) + reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True) + diff = difflib.context_diff( + nodes_str.split('\n'), + reparsed_nodes_str.split('\n'), + fromfile='Original nodes', + tofile='Reparsed nodes', + n=7) + diff = '\n'.join(diff) + new_msg += diff + '\n' + raise ValueError(new_msg) + + return source_map + + +class _Function: + + def __init__(self, name): + self.name = name + + +class OriginResolver(gast.NodeVisitor): + """Annotates an AST with additional source information like file name.""" + + def __init__(self, root_node, source_lines, comments_map, + context_lineno, context_col_offset, + filepath): + self._source_lines = source_lines + self._comments_map = comments_map + + if (hasattr(root_node, 'decorator_list') and root_node.decorator_list and + hasattr(root_node.decorator_list[0], 'lineno')): + # Typical case: functions. The line number of the first decorator + # is more accurate than the line number of the function itself in + # 3.8+. In earlier versions they coincide. + self._lineno_offset = context_lineno - root_node.decorator_list[0].lineno + else: + # Fall back to the line number of the root node. + self._lineno_offset = context_lineno - root_node.lineno + + self._col_offset = context_col_offset - root_node.col_offset + + self._filepath = filepath + + self._function_stack = [] + + def _absolute_lineno(self, lineno): + return lineno + self._lineno_offset + + def _absolute_col_offset(self, col_offset): + if col_offset is None: + return 0 + return col_offset + self._col_offset + + def _attach_origin_info(self, node): + lineno = getattr(node, 'lineno', None) + col_offset = getattr(node, 'col_offset', None) + + if lineno is None: + return + + if self._function_stack: + function_name = self._function_stack[-1].name + else: + function_name = None + + source_code_line = self._source_lines[lineno - 1] + comment = self._comments_map.get(lineno) + + loc = Location(self._filepath, self._absolute_lineno(lineno), + self._absolute_col_offset(col_offset)) + origin = OriginInfo(loc, function_name, source_code_line, comment) + anno.setanno(node, 'lineno', lineno) + anno.setanno(node, anno.Basic.ORIGIN, origin) + + def visit(self, node): + entered_function = False + if isinstance(node, gast.FunctionDef): + entered_function = True + self._function_stack.append(_Function(node.name)) + + self._attach_origin_info(node) + self.generic_visit(node) + + if entered_function: + self._function_stack.pop() + + +def resolve(node, source, context_filepath, context_lineno, context_col_offset): + """Adds origin information to an AST, based on the source it was loaded from. + + This allows us to map the original source code line numbers to generated + source code. + + Note: the AST may be a part of a larger context (e.g. a function is part of + a module that may contain other things). However, this function does not + assume the source argument contains the entire context, nor that it contains + only code corresponding to node itself. However, it assumes that node was + parsed from the given source code. + For this reason, two extra arguments are required, and they indicate the + location of the node in the original context. + + Args: + node: gast.AST, the AST to annotate. + source: Text, the source code representing node. + context_filepath: Text + context_lineno: int + context_col_offset: int + """ + # TODO(mdan): Pull this to a separate utility. + code_reader = io.StringIO(source) + comments_map = {} + try: + for token in tokenize.generate_tokens(code_reader.readline): + tok_type, tok_string, loc, _, _ = token + srow, _ = loc + if tok_type == tokenize.COMMENT: + comments_map[srow] = tok_string.strip()[1:].strip() + except tokenize.TokenError: + if isinstance(node, gast.Lambda): + # Source code resolution in older Python versions is brittle for + # lambda functions, and may contain garbage. + pass + else: + raise + + source_lines = source.split('\n') + visitor = OriginResolver(node, source_lines, comments_map, + context_lineno, context_col_offset, + context_filepath) + visitor.visit(node) + + +def resolve_entity(node, source, entity): + """Like resolve, but extracts the context information from an entity.""" + lines, lineno = tf_inspect.getsourcelines(entity) + filepath = tf_inspect.getsourcefile(entity) + + # Poor man's attempt at guessing the column offset: count the leading + # whitespace. This might not work well with tabs. + definition_line = lines[0] + col_offset = len(definition_line) - len(definition_line.lstrip()) + + resolve(node, source, filepath, lineno, col_offset) + + +def copy_origin(from_node, to_node): + """Copies the origin info from a node to another, recursively.""" + origin = anno.Basic.ORIGIN.of(from_node, default=None) + if origin is None: + return + if not isinstance(to_node, (list, tuple)): + to_node = (to_node,) + for node in to_node: + for n in gast.walk(node): + anno.setanno(n, anno.Basic.ORIGIN, origin) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/parser.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..dc9c2181f8e66262b5e85b65ac3af4b97bed38a6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/parser.py @@ -0,0 +1,396 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converting code to AST. + +Adapted from Tangent. +""" + +import ast +import inspect +import io +import linecache +import re +import sys +import textwrap +import tokenize + +import astunparse +import gast + +from tensorflow.python.autograph.pyct import errors +from tensorflow.python.autograph.pyct import inspect_utils +from tensorflow.python.util import tf_inspect + + +PY2_PREAMBLE = textwrap.dedent(""" +""") +PY3_PREAMBLE = '' +MAX_SIZE = 0 + +if sys.version_info >= (3, 9): + astunparse = ast + +if sys.version_info >= (3,): + STANDARD_PREAMBLE = PY3_PREAMBLE + MAX_SIZE = sys.maxsize +else: + STANDARD_PREAMBLE = PY2_PREAMBLE + MAX_SIZE = sys.maxint + +STANDARD_PREAMBLE_LEN = STANDARD_PREAMBLE.count('__future__') + + +_LEADING_WHITESPACE = re.compile(r'\s*') + + +def _unfold_continuations(code_string): + """Removes any backslash line continuations from the code.""" + return code_string.replace('\\\n', '') + + +def dedent_block(code_string): + """Dedents a code so that its first line starts at row zero.""" + + code_string = _unfold_continuations(code_string) + + token_gen = tokenize.generate_tokens(io.StringIO(code_string).readline) + + block_indentation = None + tokens = [] + try: + for tok in token_gen: + tokens.append(tok) + except tokenize.TokenError: + # Resolution of lambda functions may yield incomplete code, which can + # in turn generate this error. We silently ignore this error because the + # parser may still be able to deal with it. + pass + + for tok in tokens: + tok_type, tok_string, _, _, _ = tok + if tok_type == tokenize.INDENT: + block_indentation = tok_string + block_level = len(block_indentation) + break + elif tok_type not in ( + tokenize.NL, tokenize.NEWLINE, tokenize.STRING, tokenize.COMMENT): + block_indentation = '' + break + + if not block_indentation: + return code_string + + block_level = len(block_indentation) + first_indent_uses_tabs = '\t' in block_indentation + for i, tok in enumerate(tokens): + tok_type, tok_string, _, _, _ = tok + if tok_type == tokenize.INDENT: + if ((' ' in tok_string and first_indent_uses_tabs) + or ('\t' in tok_string and not first_indent_uses_tabs)): + # TODO(mdan): We could attempt to convert tabs to spaces by unix rule. + # See: + # https://docs.python.org/3/reference/lexical_analysis.html#indentation + raise errors.UnsupportedLanguageElementError( + 'code mixing tabs and spaces for indentation is not allowed') + if len(tok_string) >= block_level: + tok_string = tok_string[block_level:] + tokens[i] = (tok_type, tok_string) + + new_code = tokenize.untokenize(tokens) + + # Note: untokenize respects the line structure, but not the whitespace within + # lines. For example, `def foo()` may be untokenized as `def foo ()` + # So instead of using the output of dedent, we match the leading whitespace + # on each line. + dedented_code = [] + for line, new_line in zip(code_string.split('\n'), new_code.split('\n')): + original_indent = re.match(_LEADING_WHITESPACE, line).group() + new_indent = re.match(_LEADING_WHITESPACE, new_line).group() + if len(original_indent) > len(new_indent): + dedented_line = line[len(original_indent) - len(new_indent):] + else: + dedented_line = line + dedented_code.append(dedented_line) + new_code = '\n'.join(dedented_code) + + return new_code + + +def parse_entity(entity, future_features): + """Returns the AST and source code of given entity. + + Args: + entity: Any, Python function/method/class + future_features: Iterable[Text], future features to use (e.g. + 'print_statement'). See + https://docs.python.org/2/reference/simple_stmts.html#future + + Returns: + gast.AST, Text: the parsed AST node; the source code that was parsed to + generate the AST (including any prefixes that this function may have added). + """ + if inspect_utils.islambda(entity): + return _parse_lambda(entity) + + try: + original_source = inspect_utils.getimmediatesource(entity) + except OSError as e: + raise errors.InaccessibleSourceCodeError( + f'Unable to locate the source code of {entity}. Note that functions' + ' defined in certain environments, like the interactive Python shell,' + ' do not expose their source code. If that is the case, you should' + ' define them in a .py source file. If you are certain the code is' + ' graph-compatible, wrap the call using' + f' @tf.autograph.experimental.do_not_convert. Original error: {e}') + + source = dedent_block(original_source) + + future_statements = tuple( + 'from __future__ import {}'.format(name) for name in future_features) + source = '\n'.join(future_statements + (source,)) + + return parse(source, preamble_len=len(future_features)), source + + +def _without_context(node, lines, minl, maxl): + """Returns a clean node and source code without indenting and context.""" + for n in gast.walk(node): + lineno = getattr(n, 'lineno', None) + if lineno is not None: + n.lineno = lineno - minl + end_lineno = getattr(n, 'end_lineno', None) + if end_lineno is not None: + n.end_lineno = end_lineno - minl + + code_lines = lines[minl - 1:maxl] + + # Attempt to clean up surrounding context code. + + end_col_offset = getattr(node, 'end_col_offset', None) + if end_col_offset is not None: + # This is only available in 3.8. + code_lines[-1] = code_lines[-1][:end_col_offset] + + col_offset = getattr(node, 'col_offset', None) + if col_offset is None: + # Older Python: try to find the "lambda" token. This is brittle. + match = re.search(r'(?' % f))) + continue + v = getattr(node, f) + if isinstance(v, list): + if v: + self._print('%s%s=[' % (self._indent(), self._field(f))) + self.indent_lvl += 1 + for n in v: + if n is not None: + self.generic_visit(n) + else: + self._print('%sNone' % (self._indent())) + self.indent_lvl -= 1 + self._print('%s]' % (self._indent())) + else: + self._print('%s%s=[]' % (self._indent(), self._field(f))) + elif isinstance(v, tuple): + if v: + self._print('%s%s=(' % (self._indent(), self._field(f))) + self.indent_lvl += 1 + for n in v: + if n is not None: + self.generic_visit(n) + else: + self._print('%sNone' % (self._indent())) + self.indent_lvl -= 1 + self._print('%s)' % (self._indent())) + else: + self._print('%s%s=()' % (self._indent(), self._field(f))) + elif isinstance(v, gast.AST): + self.generic_visit(v, f) + elif isinstance(v, bytes): + self._print('%s%s=%s' % (self._indent(), self._field(f), + self._value('b"%s"' % v))) + elif isinstance(v, str): + self._print('%s%s=%s' % (self._indent(), self._field(f), + self._value('u"%s"' % v))) + else: + self._print('%s%s=%s' % (self._indent(), self._field(f), + self._value(v))) + self.indent_lvl -= 1 + + +def fmt(node, color=True, noanno=False): + printer = PrettyPrinter(color, noanno) + if isinstance(node, (list, tuple)): + for n in node: + printer.visit(n) + else: + printer.visit(node) + return printer.result diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/qual_names.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/qual_names.py new file mode 100644 index 0000000000000000000000000000000000000000..9e7d1ce46bed83b2032ec46ecd409a95cce340f7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/qual_names.py @@ -0,0 +1,266 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for manipulating qualified names. + +A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite +(e.g. 'foo.bar') syntactic symbols. + +This is *not* related to the __qualname__ attribute used by inspect, which +refers to scopes. +""" + +import collections + +import gast + +from tensorflow.python.autograph.pyct import anno +from tensorflow.python.autograph.pyct import parser + + +class CallerMustSetThis(object): + pass + + +class Symbol(collections.namedtuple('Symbol', ['name'])): + """Represents a Python symbol.""" + + +class Literal(collections.namedtuple('Literal', ['value'])): + """Represents a Python numeric literal.""" + + def __str__(self): + if isinstance(self.value, str): + return "'{}'".format(self.value) + return str(self.value) + + def __repr__(self): + return str(self) + + +# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans. +class QN(object): + """Represents a qualified name.""" + + def __init__(self, base, attr=None, subscript=None): + if attr is not None and subscript is not None: + raise ValueError('A QN can only be either an attr or a subscript, not ' + 'both: attr={}, subscript={}.'.format(attr, subscript)) + self._has_attr = False + self._has_subscript = False + + if attr is not None: + if not isinstance(base, QN): + raise ValueError( + 'for attribute QNs, base must be a QN; got instead "%s"' % base) + if not isinstance(attr, str): + raise ValueError('attr may only be a string; got instead "%s"' % attr) + self._parent = base + # TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now. + self.qn = (base, attr) + self._has_attr = True + + elif subscript is not None: + if not isinstance(base, QN): + raise ValueError('For subscript QNs, base must be a QN.') + self._parent = base + self.qn = (base, subscript) + self._has_subscript = True + + else: + if not isinstance(base, (str, Literal)): + # TODO(mdan): Require Symbol instead of string. + raise ValueError( + 'for simple QNs, base must be a string or a Literal object;' + ' got instead "%s"' % type(base)) + assert '.' not in base and '[' not in base and ']' not in base + self._parent = None + self.qn = (base,) + + def is_symbol(self): + return isinstance(self.qn[0], str) + + def is_simple(self): + return len(self.qn) <= 1 + + def is_composite(self): + return len(self.qn) > 1 + + def has_subscript(self): + return self._has_subscript + + def has_attr(self): + return self._has_attr + + @property + def attr(self): + if not self._has_attr: + raise ValueError('Cannot get attr of non-attribute "%s".' % self) + return self.qn[1] + + @property + def parent(self): + if self._parent is None: + raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0]) + return self._parent + + @property + def owner_set(self): + """Returns all the symbols (simple or composite) that own this QN. + + In other words, if this symbol was modified, the symbols in the owner set + may also be affected. + + Examples: + 'a.b[c.d]' has two owners, 'a' and 'a.b' + """ + owners = set() + if self.has_attr() or self.has_subscript(): + owners.add(self.parent) + owners.update(self.parent.owner_set) + return owners + + @property + def support_set(self): + """Returns the set of simple symbols that this QN relies on. + + This would be the smallest set of symbols necessary for the QN to + statically resolve (assuming properties and index ranges are verified + at runtime). + + Examples: + 'a.b' has only one support symbol, 'a' + 'a[i]' has two support symbols, 'a' and 'i' + """ + # TODO(mdan): This might be the set of Name nodes in the AST. Track those? + roots = set() + if self.has_attr(): + roots.update(self.parent.support_set) + elif self.has_subscript(): + roots.update(self.parent.support_set) + roots.update(self.qn[1].support_set) + else: + roots.add(self) + return roots + + def __hash__(self): + return hash(self.qn + (self._has_attr, self._has_subscript)) + + def __eq__(self, other): + return (isinstance(other, QN) and self.qn == other.qn and + self.has_subscript() == other.has_subscript() and + self.has_attr() == other.has_attr()) + + def __lt__(self, other): + return str(self) < str(other) + + def __gt__(self, other): + return str(self) > str(other) + + def __str__(self): + root = self.qn[0] + if self.has_subscript(): + return '{}[{}]'.format(root, self.qn[1]) + if self.has_attr(): + return '.'.join(map(str, self.qn)) + else: + return str(root) + + def __repr__(self): + return str(self) + + def ssf(self): + """Simple symbol form.""" + ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn] + ssf_string = '' + for i in range(0, len(self.qn) - 1): + if self.has_subscript(): + delimiter = '_sub_' + else: + delimiter = '_' + ssf_string += ssfs[i] + delimiter + return ssf_string + ssfs[-1] + + def ast(self): + """AST representation.""" + # The caller must adjust the context appropriately. + if self.has_subscript(): + return gast.Subscript( + value=self.parent.ast(), + slice=self.qn[-1].ast(), + ctx=CallerMustSetThis) + if self.has_attr(): + return gast.Attribute( + value=self.parent.ast(), attr=self.qn[-1], ctx=CallerMustSetThis) + + base = self.qn[0] + if isinstance(base, str): + return gast.Name( + base, ctx=CallerMustSetThis, annotation=None, type_comment=None) + elif isinstance(base, Literal): + return gast.Constant(base.value, kind=None) + else: + assert False, ('the constructor should prevent types other than ' + 'str and Literal') + + +class QnResolver(gast.NodeTransformer): + """Annotates nodes with QN information. + + Note: Not using NodeAnnos to avoid circular dependencies. + """ + + def visit_Name(self, node): + node = self.generic_visit(node) + anno.setanno(node, anno.Basic.QN, QN(node.id)) + return node + + def visit_Attribute(self, node): + node = self.generic_visit(node) + if anno.hasanno(node.value, anno.Basic.QN): + anno.setanno(node, anno.Basic.QN, + QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr)) + return node + + def visit_Subscript(self, node): + # TODO(mdan): This may no longer apply if we overload getitem. + node = self.generic_visit(node) + s = node.slice + if isinstance(s, (gast.Tuple, gast.Slice)): + # TODO(mdan): Support range and multi-dimensional indices. + # Continuing silently because some demos use these. + return node + if isinstance(s, gast.Constant) and s.value != Ellipsis: + subscript = QN(Literal(s.value)) + else: + # The index may be an expression, case in which a name doesn't make sense. + if anno.hasanno(s, anno.Basic.QN): + subscript = anno.getanno(s, anno.Basic.QN) + else: + return node + if anno.hasanno(node.value, anno.Basic.QN): + anno.setanno(node, anno.Basic.QN, + QN(anno.getanno(node.value, anno.Basic.QN), + subscript=subscript)) + return node + + +def resolve(node): + return QnResolver().visit(node) + + +def from_str(qn_str): + node = parser.parse_expression(qn_str) + node = resolve(node) + return anno.getanno(node, anno.Basic.QN) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/templates.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/templates.py new file mode 100644 index 0000000000000000000000000000000000000000..ee4b589102254fe2f70a00140c40b0e683ecd6dd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/templates.py @@ -0,0 +1,290 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""AST conversion templates. + +Adapted from Tangent. +""" + +import ast +import textwrap + +import gast + +from tensorflow.python.autograph.pyct import anno +from tensorflow.python.autograph.pyct import ast_util +from tensorflow.python.autograph.pyct import parser +from tensorflow.python.autograph.pyct import qual_names + + +class ContextAdjuster(gast.NodeTransformer): + """Adjusts the ctx field of nodes to ensure consistency. + + This transformer can change the ctx fields of a variable, tuple and other + AST elements that allow one, based on whether the element is being read or + written. + """ + + def __init__(self, override_value): + self._ctx_override = override_value + + def visit(self, node): + original_override = self._ctx_override + node = super(ContextAdjuster, self).visit(node) + if hasattr(node, 'ctx'): + assert node.ctx is not None, 'node {} has ctx unset'.format(node) + self._ctx_override = original_override + return node + + def _apply_override(self, node): + if self._ctx_override is not None: + node.ctx = self._ctx_override() + + def visit_Attribute(self, node): + self._apply_override(node) + self._ctx_override = gast.Load + node = self.generic_visit(node) + return node + + def visit_Tuple(self, node): + self._apply_override(node) + return self.generic_visit(node) + + def visit_List(self, node): + self._apply_override(node) + return self.generic_visit(node) + + def visit_Name(self, node): + self._apply_override(node) + return self.generic_visit(node) + + def visit_Call(self, node): + self._apply_override(node) + # We may be able to override these to Load(), but for now it's simpler + # to just assert that they're set. + self._ctx_override = None + return self.generic_visit(node) + + def visit_Dict(self, node): + # We may be able to override these to Load(), but for now it's simpler + # to just assert that they're set. + self._ctx_override = None + return self.generic_visit(node) + + def visit_Subscript(self, node): + self._apply_override(node) + self._ctx_override = gast.Load + node.value = self.visit(node.value) + return self.generic_visit(node) + + def visit_comprehension(self, node): + # We may be able to override some of these, but for now it's simpler + # to just assert that they're set. + self._ctx_override = None + return self.generic_visit(node) + + def visit_Lambda(self, node): + # We may be able to override some of these, but for now it's simpler + # to just assert that they're set. + self._ctx_override = None + return self.generic_visit(node) + + +class ReplaceTransformer(gast.NodeTransformer): + """Replace AST nodes.""" + + def __init__(self, replacements): + """Create a new ReplaceTransformer. + + Args: + replacements: A mapping from placeholder names to (lists of) AST nodes + that these placeholders will be replaced by. + """ + self.replacements = replacements + self.in_replacements = False + self.preserved_annos = { + anno.Basic.DIRECTIVES, + anno.Basic.EXTRA_LOOP_TEST, + anno.Basic.ORIGIN, + anno.Basic.SKIP_PROCESSING, + anno.Static.ORIG_DEFINITIONS, + 'function_context_name', + } + + def _prepare_replacement(self, replaced, key): + """Prepares a replacement AST that's safe to swap in for a node. + + Args: + replaced: ast.AST, the node being replaced + key: Hashable, the key of the replacement AST + Returns: + ast.AST, the replacement AST + """ + repl = self.replacements[key] + + new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos) + if isinstance(new_nodes, gast.AST): + new_nodes = [new_nodes] + + return new_nodes + + def visit_Expr(self, node): + # When replacing a placeholder with an entire statement, the replacement + # must stand on its own and not be wrapped in an Expr. + new_value = self.visit(node.value) + if new_value is node.value: + return node + return new_value + + def visit_keyword(self, node): + if node.arg not in self.replacements: + return self.generic_visit(node) + + repl = self._prepare_replacement(node, node.arg) + if isinstance(repl, gast.keyword): + return repl + elif (repl and isinstance(repl, (list, tuple)) and + all(isinstance(r, gast.keyword) for r in repl)): + return repl + # TODO(mdan): We may allow replacing with a string as well. + # For example, if one wanted to replace foo with bar in foo=baz, then + # we could allow changing just node arg, so that we end up with bar=baz. + raise ValueError( + 'a keyword argument may only be replaced by another keyword or a ' + 'non-empty list of keywords. Found: {} for keyword {}'.format( + repl, node.arg)) + + def visit_FunctionDef(self, node): + node = self.generic_visit(node) + if node.name not in self.replacements: + return node + + repl = self.replacements[node.name] + if not isinstance(repl, (gast.Name, ast.Name)): + raise ValueError( + 'a function name can only be replaced by a Name node. Found: %s' % + repl) + node.name = repl.id + return node + + def visit_Attribute(self, node): + node = self.generic_visit(node) + if node.attr not in self.replacements: + return node + + repl = self.replacements[node.attr] + if not isinstance(repl, gast.Name): + raise ValueError( + 'An attribute can only be replaced by a Name node. Found: %s' % repl) + node.attr = repl.id + return node + + def visit_Name(self, node): + if node.id not in self.replacements: + return node + + new_nodes = self._prepare_replacement(node, node.id) + + if not new_nodes: + return new_nodes + + # Preserve the target context. + adjuster = ContextAdjuster(type(node.ctx)) + for n in new_nodes: + if hasattr(n, 'ctx'): + adjuster.visit(n) + + if len(new_nodes) == 1: + new_nodes, = new_nodes + + return new_nodes + + +def _convert_to_ast(n): + """Converts from a known data type to AST.""" + # Note: When generating AST nodes from strings/QNs in isolation, ctx is + # unknown. ctx must be filled in according to the template being used. + # See ReplaceTransformer.visit_Name. + if isinstance(n, str): + return gast.Name(id=n, ctx=None, annotation=None, type_comment=None) + if isinstance(n, qual_names.QN): + return n.ast() + if isinstance(n, list): + return [_convert_to_ast(e) for e in n] + if isinstance(n, tuple): + return tuple(_convert_to_ast(e) for e in n) + return n + + +def replace(template, **replacements): + """Replaces placeholders in a Python template. + + AST Name and Tuple nodes always receive the context that inferred from + the template. However, when replacing more complex nodes (that can potentially + contain Name children), then the caller is responsible for setting the + appropriate context. + + Args: + template: A string representing Python code. Any symbol name can be used + that appears in the template code can be used as placeholder. + **replacements: A mapping from placeholder names to (lists of) AST nodes + that these placeholders will be replaced by. String values are also + supported as a shorthand for AST Name nodes with the respective ID. + + Returns: + An AST node or list of AST nodes with the replacements made. If the + template was a function, a list will be returned. If the template was a + node, the same node will be returned. If the template was a string, an + AST node will be returned (a `Module` node in the case of a multi-line + string, an `Expr` node otherwise). + + Raises: + ValueError: if the arguments are incorrect. + """ + if not isinstance(template, str): + raise ValueError('Expected string template, got %s' % type(template)) + for k in replacements: + replacements[k] = _convert_to_ast(replacements[k]) + template_str = parser.STANDARD_PREAMBLE + textwrap.dedent(template) + nodes = parser.parse( + template_str, + preamble_len=parser.STANDARD_PREAMBLE_LEN, + single_node=False) + results = [] + for node in nodes: + node = ReplaceTransformer(replacements).visit(node) + if isinstance(node, (list, tuple)): + results.extend(node) + else: + results.append(node) + results = [qual_names.resolve(r) for r in results] + return results + + +def replace_as_expression(template, **replacements): + """Variant of replace that generates expressions, instead of code blocks.""" + replacement = replace(template, **replacements) + if len(replacement) != 1: + raise ValueError( + 'single expression expected; for more general templates use replace') + node, = replacement + + if isinstance(node, gast.Expr): + return node.value + elif isinstance(node, gast.Name): + return node + + raise ValueError( + 'the template is expected to generate an expression or a name node;' + ' instead found %s' % node) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/transformer.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..005135e07b77866b99a5eecd38b585b2ad6b1185 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/transformer.py @@ -0,0 +1,538 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A node transformer that includes utilities for SCT.""" + +import collections +import enum + +import gast + +from tensorflow.python.autograph.pyct import anno +from tensorflow.python.autograph.pyct import parser +from tensorflow.python.autograph.pyct import pretty_printer +from tensorflow.python.autograph.pyct import templates + + +class AnalysisLevel(enum.IntEnum): + + NONE = 0 + ACTIVITY = 1 + DEFINEDNESS = 2 + LIVENESS = 3 + + +# TODO(znado): Use namedtuple. +class Context(object): + """Contains information about a source code transformation. + + This object is mutable, and is updated during conversion. Not thread safe. + + Attributes: + info: EntityInfo, immutable. + namer: naming.Namer. + current_origin: origin_info.OriginInfo, holds the OriginInfo of the last + AST node to be processed successfully. Useful for error handling. + user: An user-supplied context object. The object is opaque to the + infrastructure, but will pe passed through to all custom transformations. + """ + + def __init__(self, info, namer, user_context): + self.info = info + self.namer = namer + self.current_origin = None + self.user = user_context + + +# TODO(mdan): Move to a standalone file. +class EntityInfo( + collections.namedtuple( + 'EntityInfo', + ('name', 'source_code', 'source_file', 'future_features', 'namespace')) +): + """Contains information about a Python entity. + + Immutable. + + Examples of entities include functions and classes. + + Attributes: + name: The name that identifies this entity. + source_code: The entity's source code. + source_file: The entity's source file. + future_features: Tuple[Text], the future features that this entity was + compiled with. See + https://docs.python.org/2/reference/simple_stmts.html#future. + namespace: Dict[str, ], containing symbols visible to the entity (excluding + parameters). + """ + pass + + +class _StateStack(object): + """Templated context manager. + + This class provides syntactic sugar for a stack of objects of known + type. It allows accessing attributes of the object at the top of the stack + directly against this object, which allows for very terse syntax. + + For example, this code: + + stack = _StateStack(Foo) + stack.enter() + stack.bar + + Is equivalent to: + + stack = [] + stack.append(Foo()) + foo = stack[-1] + foo.bar + + See _State for more on how this is used. + + Attributes: + type: Any, the type of objects that this stack holds + level: int, the current stack depth + stack: List[Any], the actual stack + value: Any, the instance of the object at the top of the stack + """ + + def __init__(self, type_): + # Because we override __setattr__, we need to attach these attributes using + # the superclass' setattr. + object.__setattr__(self, 'type', type_) + object.__setattr__(self, '_stack', []) + if not hasattr(type_, 'no_root'): + self.enter() + + def __enter__(self): + self.enter() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.exit() + + def enter(self): + self._stack.append(self.type()) + + def exit(self): + self._stack.pop() + + @property + def stack(self): + return self._stack + + @property + def level(self): + return len(self._stack) + + @property + def value(self): + return self._stack[-1] + + def __iter__(self): + return iter(self._stack) + + def __getattr__(self, key): + return getattr(self._stack[-1], key) + + def __setattr__(self, key, value): + setattr(self._stack[-1], key, value) + + +class _State(object): + """Syntactic sugar for accessing an instance of a StateStack context manager. + + This structure offers syntactic sugar over a dict of stacks of objects + of known type. These structures are useful to keep state during AST walks. + Multiple different scopes can be tracked in parallel. For example: + + s = _State() + + s[foo].enter() + s[bar].enter() # this will not affect s[foo] + + Element access has special semantics: + * keys are a data type + * element values are _StateStack(type=key) objects + * missing elements are automatically added, similarly to defaultdict + + For example, the following block : + + _State s + s[Foo] + + Is equivalent to: + + s = {} + if Foo not in s: + s[Foo] = Foo() + s[Foo] + + See Base for how it's used. + """ + + def __init__(self): + self._value = {} + + def __getitem__(self, key): + if key not in self._value: + self._value[key] = _StateStack(key) + return self._value[key] + + +class NodeStateTracker(object): + """Base class for general-purpose Python code transformation. + + This abstract class provides helpful functions, like state tracking within + the scope of arbitrary node, helpers for processing code blocks, debugging, + mapping of transformed code to original code, and others. + + Scope-local state tracking: to keep state across nodes, at the level of + (possibly nested) scopes, use enter/exit_local_scope and set/get_local. + You must call enter/exit_local_scope manually, but the transformer detects + when they are not properly paired. + + The transformer allows keeping state across calls that is local + to arbitrary nodes and their descendants, using the self.state attribute. + Multiple independent scopes are allowed and automatically constructed. + + For example, to keep track of the `If` node that encloses any `Name` node, + one can write: + + ``` + class FooType(object): + + def __init__(self): + self.foo_property = None + + class DummyTransformer(NodeStateTracker, ast.NodeTransformer): + + def visit_If(self, node): + self.state[FooType].enter() + self.state[FooType].foo_property = node + node = self.veneric_visit(node) + self.state[FooType].exit() + return node + + def visit_Name(self, node): + self.state[FooType].foo_property # will hold the innermost enclosing if + ``` + + Alternatively, the `enter()`/`exit()` calls can be managed by a `with` + statement: + + ``` + def visit_If(self, node): + with self.state[FooType] as foo: + foo.foo_property = node + return self.generic_visit(node) + ``` + """ + + # TODO(mdan): Document all extra features. + + def __init__(self, ctx): + """Initialize the transformer. + + Subclasses should call this. + + Args: + ctx: A Context object. + """ + self._lineno = 0 + self._col_offset = 0 + self.ctx = ctx + + # Allows scoping of local variables to keep state across calls to visit_* + # methods. Multiple scope hierarchies may exist and are keyed by tag. A + # scope is valid at one or more nodes and all its children. Scopes created + # in child nodes supersede their parent. Scopes are isolated from one + # another. + self.state = _State() + + def debug_print(self, node): + """Helper method useful for debugging. Prints the AST.""" + if __debug__: + print(pretty_printer.fmt(node)) + return node + + def debug_print_src(self, node): + """Helper method useful for debugging. Prints the AST as code.""" + if __debug__: + print(parser.unparse(node)) + return node + + def visit_block(self, nodes, before_visit=None, after_visit=None): + """A more powerful version of generic_visit for statement blocks. + + An example of a block is the body of an if statement. + + This function allows specifying a postprocessing callback (the + after_visit argument) argument which can be used to move nodes to a new + destination. This is done by after_visit by returning a non-null + second return value, e.g. return new_node, new_destination. + + For example, a transformer could perform the following move: + + foo() + bar() + baz() + + foo() + if cond: + bar() + baz() + + The above could be done with a postprocessor of this kind: + + def after_visit(node): + if node_is_function_call(bar): + new_container_node = build_cond() + new_container_node.body.append(node) + return new_container_node, new_container_node.body + else: + # Once we set a new destination, all subsequent items will be + # moved to it, so we don't need to explicitly handle baz. + return node, None + + Args: + nodes: enumerable of AST node objects. If None, the function returns None. + before_visit: optional callable that is called before visiting each item + in nodes + after_visit: optional callable that takes in an AST node and returns a + tuple (new_node, new_destination). It is called after visiting each item + in nodes. Is used in the same was as the visit_* methods: new_node will + replace the node; if not None, new_destination must be a list, and + subsequent nodes will be placed in this list instead of the list + returned by visit_block. + + Returns: + A list of AST node objects containing the transformed items from nodes, + except those nodes that have been relocated using after_visit. + """ + if nodes is None: + return None + + results = [] + node_destination = results + for node in nodes: + if before_visit: + # TODO(mdan): We can modify node here too, if ever needed. + before_visit() + + replacement = self.visit(node) + + if after_visit and replacement: + replacement, new_destination = after_visit(replacement) + else: + new_destination = None + + if replacement: + if isinstance(replacement, (list, tuple)): + node_destination.extend(replacement) + else: + node_destination.append(replacement) + + # Allow the postprocessor to reroute the remaining nodes to a new list. + if new_destination is not None: + node_destination = new_destination + return results + + +# TODO(mdan): Rename to PythonCodeTransformer. +class Base(NodeStateTracker, gast.NodeTransformer): + """Base class for general-purpose Python-to-Python code transformation. + + This is an extension of ast.NodeTransformer that provides the additional + functions offered by NodeStateTracker. + """ + + def create_assignment(self, target, expression): + template = """ + target = expression + """ + return templates.replace(template, target=target, expression=expression) + + # TODO(mdan): Remove. + def apply_to_single_assignments(self, targets, values, apply_fn): + """Applies a function to each individual assignment. + + This function can process a possibly-unpacked (e.g. a, b = c, d) assignment. + It tries to break down the unpacking if possible. In effect, it has the same + effect as passing the assigned values in SSA form to apply_fn. + + Examples: + + The following will result in apply_fn(a, c), apply_fn(b, d): + + a, b = c, d + + The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]): + + a, b = c + + The following will result in apply_fn(a, (b, c)): + + a = b, c + + It uses the visitor pattern to allow subclasses to process single + assignments individually. + + Args: + targets: list, tuple of or individual AST node. Should be used with the + targets field of an ast.Assign node. + values: an AST node. + apply_fn: a function of a single argument, which will be called with the + respective nodes of each single assignment. The signature is + apply_fn(target, value), no return value. + """ + if not isinstance(targets, (list, tuple)): + targets = (targets,) + for target in targets: + if isinstance(target, (gast.Tuple, gast.List)): + for i in range(len(target.elts)): + target_el = target.elts[i] + if isinstance(values, (gast.Tuple, gast.List)): + value_el = values.elts[i] + else: + value_el = gast.Subscript(values, i, ctx=gast.Store()) + self.apply_to_single_assignments(target_el, value_el, apply_fn) + else: + # TODO(mdan): Look into allowing to rewrite the AST here. + apply_fn(target, values) + + def visit(self, node): + if not isinstance(node, gast.AST): + # This is not that uncommon a mistake: various node bodies are lists, for + # example, posing a land mine for transformers that need to recursively + # call `visit`. The error needs to be raised before the exception handler + # below is installed, because said handler will mess up if `node` is not, + # in fact, a node. + msg = ('invalid value for "node": expected "ast.AST", got "{}"; to' + ' visit lists of nodes, use "visit_block" instead').format( + type(node)) + raise ValueError(msg) + + if anno.hasanno(node, anno.Basic.SKIP_PROCESSING): + return node + + parent_origin = self.ctx.current_origin + if anno.hasanno(node, anno.Basic.ORIGIN): + self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN) + + try: + processing_expr_node = isinstance(node, gast.Expr) + if processing_expr_node: + entry_expr_value = node.value + + result = super(Base, self).visit(node) + + # Adjust for consistency: replacing the value of an Expr with + # an Assign node removes the need for the Expr node. + if (processing_expr_node and isinstance(result, gast.Expr) and + (result.value is not entry_expr_value)): + # When the replacement is a list, it is assumed that the list came + # from a template that contained a number of statements, which + # themselves are standalone and don't require an enclosing Expr. + if isinstance(result.value, + (list, tuple, gast.Assign, gast.AugAssign)): + result = result.value + + # By default, all replacements receive the origin info of the replaced + # node. + if result is not node and result is not None: + inherited_origin = anno.getanno( + node, anno.Basic.ORIGIN, default=parent_origin) + if inherited_origin is not None: + nodes_to_adjust = result + if isinstance(result, (list, tuple)): + nodes_to_adjust = result + else: + nodes_to_adjust = (result,) + for n in nodes_to_adjust: + if not anno.hasanno(n, anno.Basic.ORIGIN): + anno.setanno(n, anno.Basic.ORIGIN, inherited_origin) + finally: + self.ctx.current_origin = parent_origin + + return result + + +class CodeGenerator(NodeStateTracker, gast.NodeVisitor): + """Base class for general-purpose Python-to-string code transformation. + + Similar to Base, but outputs arbitrary strings instead of a Python AST. + + This uses the same visitor mechanism that the standard NodeVisitor uses, + meaning that subclasses write handlers for the different kinds of nodes. + New code is generated using the emit method, which appends to a code buffer + that can be afterwards obtained from code_buffer. + + Example: + + class SimpleCodeGen(CodeGenerator): + + def visitIf(self, node): + self.emit('if ') + self.visit(node.test) + self.emit(' { ') + self.visit(node.body) + self.emit(' } else { ') + self.visit(node.orelse) + self.emit(' } ') + + node = ast.parse(...) + gen = SimpleCodeGen() + gen.visit(node) + # gen.code_buffer contains the resulting code + """ + + def __init__(self, ctx): + super(CodeGenerator, self).__init__(ctx) + + self._output_code = '' + self.source_map = {} + + def emit(self, code): + self._output_code += code + + @property + def code_buffer(self): + return self._output_code + + def visit(self, node): + if anno.hasanno(node, anno.Basic.SKIP_PROCESSING): + return + + parent_origin = self.ctx.current_origin + eof_before = len(self._output_code) + if anno.hasanno(node, anno.Basic.ORIGIN): + self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN) + + try: + ret = super(CodeGenerator, self).visit(node) + + # By default, all replacements receive the origin info of the replaced + # node. + eof_after = len(self._output_code) + if eof_before - eof_after: + inherited_origin = anno.getanno( + node, anno.Basic.ORIGIN, default=parent_origin) + if inherited_origin is not None: + self.source_map[(eof_before, eof_after)] = inherited_origin + return ret + finally: + self.ctx.current_origin = parent_origin diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/transpiler.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/transpiler.py new file mode 100644 index 0000000000000000000000000000000000000000..f7b9150e728fc9fe368af163660a936210d271d6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/pyct/transpiler.py @@ -0,0 +1,496 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Generic source code transformation infrastructure.""" + +import inspect +import threading +import types + +import gast + +from tensorflow.python.autograph.pyct import cache +from tensorflow.python.autograph.pyct import inspect_utils +from tensorflow.python.autograph.pyct import loader +from tensorflow.python.autograph.pyct import naming +from tensorflow.python.autograph.pyct import origin_info +from tensorflow.python.autograph.pyct import parser +from tensorflow.python.autograph.pyct import templates +from tensorflow.python.autograph.pyct import transformer +from tensorflow.python.autograph.utils import ag_logging as logging + + +def _wrap_into_factory(nodes, entity_name, inner_factory_name, + outer_factory_name, closure_vars, factory_args, + future_features): + """Wraps an AST into the body of a factory with consistent lexical context. + + The AST is expected to define some symbol with a name given by `entity_name`. + + This mechanism ensures that the resulting transformed entity has lexical + scoping identical to that of the source entity, while allowing extra + parametrization. + + Two nested factories achieve the following: + + 1. The inner factory dynamically creates the entity represented by `nodes`. + 2. The inner factory is parametrized by a custom set of arguments. + 3. The inner factory has a closure identical to that of the transformed + entity. + 4. The inner factory has local variables named like `args`, which `nodes` may + use as additional parameters. + 5. The inner factory returns the variables given by `entity_name`. + 6. The outer factory is niladic. + 7. The outer factory has no closure. + 8. The outer factory creates the necessary lexical scope for the inner + factory, so that the loaded code has the given configuration for + closure/globals. + 9. The outer factory returns the inner factory. + + Roughly speaking, the following code is generated: + + from __future__ import future_feature_1 + from __future__ import future_feature_2 + ... + + def outer_factory(): + closure_var_1 = None + closure_var_2 = None + ... + + def inner_factory(arg_1, arg_2, ...): + <> + return entity + + return inner_factory + + The lexical scoping is created using dummy symbol declarations which create + local variables in the body of the outer factory, so that the Python parser + correctly marks them as free non-global variables upon load (that is, it + creates cell slots for each symbol. These symbols are initialized with None, + but their values are not expected to be used; instead, the caller is expected + to replace them with the cells of the source entity. For more details, see: + https://docs.python.org/3/reference/executionmodel.html#binding-of-names + + Args: + nodes: Tuple[ast.AST], the source code to wrap. + entity_name: Union[Text, ast.AST], the name of the principal entity that + `nodes` define. + inner_factory_name: Text, the name of the inner factory. + outer_factory_name: Text, the name of the outer factory. + closure_vars: Iterable[Text], names of the closure variables for the inner + factory. + factory_args: Iterable[Text], names of additional arguments for the + inner factory. Useful to configure variables that the converted code can + use. Typically, these are modules. + future_features: Iterable[Text], names of future statements to associate the + code with. + + Returns: + ast.AST + """ + dummy_closure_defs = [] + for var_name in closure_vars: + template = """ + var_name = None + """ + dummy_closure_defs.extend(templates.replace(template, var_name=var_name)) + + if future_features: + future_imports = gast.ImportFrom( + module='__future__', + names=[gast.alias(name=name, asname=None) for name in future_features], + level=0) + else: + future_imports = [] + + factory_args = [ + gast.Name(name, ctx=gast.Param(), annotation=None, type_comment=None) + for name in factory_args + ] + + template = """ + future_imports + def outer_factory_name(): + dummy_closure_defs + def inner_factory_name(factory_args): + entity_defs + return entity_name + return inner_factory_name + """ + return templates.replace( + template, + dummy_closure_defs=dummy_closure_defs, + entity_defs=nodes, + entity_name=entity_name, + factory_args=factory_args, + future_imports=future_imports, + inner_factory_name=inner_factory_name, + outer_factory_name=outer_factory_name) + + +class _PythonFnFactory(object): + """Helper object that wraps a Python function factory.""" + + def __init__(self, name, freevars, extra_locals): + """Creates a new factory for a Python function. + + Args: + name: The function name. + freevars: The list of non-global free variables for the function. + extra_locals: Dict[Text, Any], names and values for custom variables that + are accessible to the generated code as local variables. + """ + self._name = name + self._freevars = freevars + self._extra_locals = extra_locals + + self._unbound_factory = None + self.module = None + self.source_map = None + + def create(self, + nodes, + namer, + inner_factory_name='inner_factory', + outer_factory_name='outer_factory', + future_features=()): + """Initializes a function.""" + if self._unbound_factory is not None: + raise ValueError('double initialization; create a new object instead') + + inner_factory_name = namer.new_symbol(inner_factory_name, ()) + outer_factory_name = namer.new_symbol(outer_factory_name, ()) + nodes = _wrap_into_factory(nodes, self._name, inner_factory_name, + outer_factory_name, self._freevars, + self._extra_locals.keys(), future_features) + + module, _, source_map = loader.load_ast( + nodes, include_source_map=True) + outer_factory = getattr(module, outer_factory_name) + self._unbound_factory = outer_factory() + self.module = module + self.source_map = source_map + + def instantiate(self, + globals_, + closure, + defaults=None, + kwdefaults=None): + """Creates a new function instance.""" + if self._unbound_factory is None: + raise ValueError('call create first') + + factory_code = self._unbound_factory.__code__ + factory_freevars = factory_code.co_freevars + closure_map = dict(zip(self._freevars, closure)) + factory_closure = tuple( + closure_map[name] for name in factory_code.co_freevars) + if len(factory_closure) != len(closure): + raise ValueError( + 'closure mismatch, requested {}, but source function had {}'.format( + self._freevars, factory_freevars)) + + bound_factory = types.FunctionType( + code=factory_code, + globals=globals_, + name=self._name, + argdefs=(), + closure=factory_closure) + + # The lint override is a false positive. + new_fn = bound_factory(**self._extra_locals) # pylint:disable=not-callable + + if defaults: + new_fn.__defaults__ = defaults + if kwdefaults: + new_fn.__kwdefaults__ = kwdefaults + + return new_fn + + +class GenericTranspiler(object): + """A generic transpiler for Python functions. + + Its interface is the `transform` API, which can process Python function + objects. Internally, it handles parsing. + + Users typically subclass this, customizing the `transform_ast` method. The + output of transformed_ast is returned directly by `transform`. Existing + methods like `transform_function` may also be overloaded. + + Example: + + class MyTransformer(GenericTranspiler): + + def transform_ast(self, node, ctx): + result = <> + return result + + transformer = MyTransformer() + + result = transformer.transform(f, ...) + # result is the output + """ + + def get_transformed_name(self, node): + """Returns a name for the output function. Subclasses may override this.""" + if isinstance(node, gast.Lambda): + return 'lam' + elif isinstance(node, gast.FunctionDef): + return node.name + raise ValueError('Unknown node type {}'.format(node)) + + def transform_ast(self, node, ctx): + """Performs an actual transformation of a function's AST. + + Subclasses must implement this method, and do not usually call it. + + Args: + node: One or more ast.AST nodes representing the AST to be transformed. + ctx: transformer.Context. + """ + raise NotImplementedError('subclasses must override this') + + def transform(self, obj, user_context): + """Transforms a Python object. + + Users typically call this method. + + Args: + obj: A Python object, function, type, etc. + user_context: An opaque object (may be None) that is forwarded to + transform_ast, through the ctx.user attribute. + Returns: + The result of calling transform_function. + + Raises: + NotImplementedError: if the type of obj is not handled. + """ + if inspect.isfunction(obj) or inspect.ismethod(obj): + return self.transform_function(obj, user_context) + + raise NotImplementedError('Non-function: {}'.format(type(obj))) + + def _erase_arg_defaults(self, node): + """Erase arg default expressions, which would otherwise be unbound.""" + args = node.args + for i in range(len(args.defaults)): + args.defaults[i] = parser.parse_expression('None') + for i, d in enumerate(args.kw_defaults): + if d is not None: + args.kw_defaults[i] = parser.parse_expression('None') + return node + + def transform_module(self, mod, user_context): + """Transforms a module. + + Subclasses may override this method. The return value is opaque. + + The method receives the original AST. The result is passed as-is to the + output of `transform`. + + Args: + mod: A Python module. + user_context: An opaque object (may be None) that is forwarded to + transform_ast, through the ctx.user attribute. + Returns: + List[Tuple[Any, Any]]. By default it returns the output of transform_ast, + evaluated on each supported member, other than modules, together with a + `transformer.Context` containing information about the transformation + process. + """ + result = [] + for member in mod.__dict__.values(): + if inspect.ismodule(member): + continue # Not transforming modules recursively. + try: + result.append(self.transform(member, user_context)) + except NotImplementedError: + pass # Skip unsupported elements. + return result + + def transform_function(self, fn, user_context): + """Transforms a function. + + Subclasses may override this method. The return value is opaque. + + The method receives the original AST. The result is passed as-is to the + output of `transform`. + + Args: + fn: A function or lambda. + user_context: An opaque object (may be None) that is forwarded to + transform_ast, through the ctx.user attribute. + Returns: + Tuple[Any, Any]. By default it returns the output of transform_ast, + together with a `transformer.Context` containing information about the + transformation process. + """ + future_features = inspect_utils.getfutureimports(fn) + node, source = parser.parse_entity(fn, future_features=future_features) + logging.log(3, 'Source code of %s:\n\n%s\n', fn, source) + + origin_info.resolve_entity(node, source, fn) + + namespace = inspect_utils.getnamespace(fn) + namer = naming.Namer(namespace) + new_name = namer.new_symbol(self.get_transformed_name(node), ()) + entity_info = transformer.EntityInfo( + name=new_name, + source_code=source, + source_file='', + future_features=future_features, + namespace=namespace) + context = transformer.Context(entity_info, namer, user_context) + + node = self._erase_arg_defaults(node) + result = self.transform_ast(node, context) + + return result, context + + +class PyToPy(GenericTranspiler): + """A generic Python-to-Python transpiler. + + Its `transform` method offers a function-in, function-out interface. + Internally, it takes care of parsing, caching and loading of the translated + code. + + Users typically subclass this, overriding `transform_ast`. + + Usually, instances of this class are singletons, since each instance manages + its own cache. The caching can be controlled by overriding `get_caching_key`. + + Example: + + class MyTransformer(PyToPy): + + def transform_ast(self, node, ctx): + node = <> + return node + + transformer = MyTransformer() + + new_f, module, source_map = transformer.transform_function(f, ...) + # new_f is a function with signature identical to f + + The transformed function has access to the same namespace as the original + function. To allow access to internal APIs, users may inject additional + symbols by overriding `get_extra_locals`. + """ + + def __init__(self): + self._cache_lock = threading.RLock() + self._cache = cache.CodeObjectCache() + + def get_extra_locals(self): + """Returns extra static local variables to be made to transformed code. + + Subclasses must override this. + + Returns: + extra_locals: A Dict[Text, Any] containing additional variables to make + available to the transformed code. + """ + raise NotImplementedError('subclasses must override this') + + def get_caching_key(self, user_context): + """Returns a unique key to use for caching. + + Subclasses must override this. + + Calls made to `transform_function` with functions that have the same code + object and caching key will return a cached instance on subsequent + invocations. + + Args: + user_context: The context object which was passed to `transform`. + + Returns: + extra_locals: A hashable. + """ + raise NotImplementedError('subclasses must override this') + + def _cached_factory(self, fn, cache_subkey): + cached_factory = self._cache[fn][cache_subkey] + logging.log(3, 'Cache hit for %s subkey %s: %s', fn, cache_subkey, + cached_factory) + return cached_factory + + def transform_function(self, fn, user_context): + """Transforms a function. See GenericTranspiler.transform_function. + + This overload wraps the parent's `transform_function`, adding caching and + facilities to instantiate the output as a Python object. It also + adds facilities to make new symbols available to the generated Python code, + visible as local variables - see `get_extra_locals`. + + Args: + fn: A function or lambda. + user_context: An opaque object (may be None) that is forwarded to + transform_ast, through the ctx.user attribute. + + Returns: + A tuple: + * A function or lambda with the same signature and closure as `fn` + * The temporary module into which the transformed function was loaded + * The source map as a + Dict[origin_info.LineLocation, origin_info.OriginInfo] + """ + cache_subkey = self.get_caching_key(user_context) + + if self._cache.has(fn, cache_subkey): + # Fast path: use a lock-free check. + factory = self._cached_factory(fn, cache_subkey) + + else: + with self._cache_lock: + # Check again under lock. + if self._cache.has(fn, cache_subkey): + factory = self._cached_factory(fn, cache_subkey) + + else: + logging.log(1, '%s is not cached for subkey %s', fn, cache_subkey) + # TODO(mdan): Confusing overloading pattern. Fix. + nodes, ctx = super(PyToPy, self).transform_function(fn, user_context) + + if isinstance(nodes, gast.Lambda): + nodes = gast.Assign( + targets=[ + gast.Name( + ctx.info.name, + ctx=gast.Store(), + annotation=None, + type_comment=None) + ], + value=nodes) + else: + nodes.name = ctx.info.name + + if logging.has_verbosity(2): + logging.log(2, 'Transformed %s:\n\n%s\n', fn, parser.unparse(nodes)) + + factory = _PythonFnFactory( + ctx.info.name, fn.__code__.co_freevars, self.get_extra_locals()) + factory.create( + nodes, ctx.namer, future_features=ctx.info.future_features) + self._cache[fn][cache_subkey] = factory + + transformed_fn = factory.instantiate( + globals_=fn.__globals__, + closure=fn.__closure__ or (), + defaults=fn.__defaults__, + kwdefaults=getattr(fn, '__kwdefaults__', None)) + return transformed_fn, factory.module, factory.source_map diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3fd4d166f3641af5bfefebf0b3c0b0a42b1f2f9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/client_lib.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/client_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..337212bc718a6a42719387b3b8ab66c91765208e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/client_lib.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/device_lib.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/device_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d136f0f5b73fe5de55f733bb4b24d3e10fbcf96e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/device_lib.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/pywrap_tf_session.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/pywrap_tf_session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..079f2b182533453d1825bdd5aaecb245adac5e85 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/pywrap_tf_session.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/session.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d729037bbb2bc10d4193f1137c43b305a72474aa Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/session.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/timeline.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/timeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53cd24224714c9836b02522ff4cace7ff88d6b57 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/__pycache__/timeline.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_debug_events_writer.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_debug_events_writer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b16b36a53d5c428e638382e41d83a5efa2374703 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_debug_events_writer.pyi @@ -0,0 +1,26 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def Close(arg0: str) -> None: ... +def FlushExecutionFiles(arg0: str) -> None: ... +def FlushNonExecutionFiles(arg0: str) -> None: ... +def Init(arg0: str, arg1: str, arg2: int) -> None: ... +def RegisterDeviceAndGetId(arg0: str, arg1: str) -> int: ... +def WriteDebuggedGraph(arg0: str, arg1: object) -> None: ... +def WriteExecution(arg0: str, arg1: object) -> None: ... +def WriteGraphExecutionTrace(arg0: str, arg1: object) -> None: ... +def WriteGraphOpCreation(arg0: str, arg1: object) -> None: ... +def WriteSourceFile(arg0: str, arg1: object) -> None: ... +def WriteStackFrameWithId(arg0: str, arg1: object) -> None: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_debug_events_writer.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_debug_events_writer.so new file mode 100644 index 0000000000000000000000000000000000000000..4510c30f2ae2c83f9314760370429b146d7bc181 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_debug_events_writer.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_device_lib.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_device_lib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3a39ae2a23620c605e09c15fcc9a37ae59a6f15b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_device_lib.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def list_devices(arg0: object) -> list: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_device_lib.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_device_lib.so new file mode 100644 index 0000000000000000000000000000000000000000..4a16bd5d151c5cb1f4505be1fee66489f0ab0256 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_device_lib.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_events_writer.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_events_writer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..92da35bcfe093b9c7a7853239f32132bbb844bde --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_events_writer.pyi @@ -0,0 +1,26 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +class EventsWriter: + def __init__(self, arg0: str) -> None: ... + def Close(self) -> Status: ... + def FileName(self) -> str: ... + def Flush(self) -> Status: ... + def InitWithSuffix(self, arg0: str) -> Status: ... + def WriteEvent(self, arg0: object) -> None: ... + def _WriteSerializedEvent(self, arg0: str) -> None: ... + +class Status: + def __init__(self, *args, **kwargs) -> None: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_events_writer.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_events_writer.so new file mode 100644 index 0000000000000000000000000000000000000000..67bda872334d4ac8190ec313b049602130d6b1f2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_events_writer.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.pyi new file mode 100644 index 0000000000000000000000000000000000000000..14645b34c5f5be3641d799efc45ca88b1afd3280 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.pyi @@ -0,0 +1,455 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any, ClassVar, Iterator, Optional + +from typing import overload +TF_ABORTED: TF_Code +TF_BFLOAT16: TF_DataType +TF_BOOL: TF_DataType +TF_CANCELLED: TF_Code +TF_COMPLEX: TF_DataType +TF_COMPLEX128: TF_DataType +TF_COMPLEX64: TF_DataType +TF_DATA_LOSS: TF_Code +TF_DEADLINE_EXCEEDED: TF_Code +TF_DOUBLE: TF_DataType +TF_FAILED_PRECONDITION: TF_Code +TF_FLOAT: TF_DataType +TF_HALF: TF_DataType +TF_INT16: TF_DataType +TF_INT32: TF_DataType +TF_INT64: TF_DataType +TF_INT8: TF_DataType +TF_INTERNAL: TF_Code +TF_INVALID_ARGUMENT: TF_Code +TF_OK: TF_Code +TF_OUT_OF_RANGE: TF_Code +TF_PERMISSION_DENIED: TF_Code +TF_QINT16: TF_DataType +TF_QINT32: TF_DataType +TF_QINT8: TF_DataType +TF_QUINT16: TF_DataType +TF_QUINT8: TF_DataType +TF_RESOURCE: TF_DataType +TF_RESOURCE_EXHAUSTED: TF_Code +TF_STRING: TF_DataType +TF_UINT16: TF_DataType +TF_UINT32: TF_DataType +TF_UINT64: TF_DataType +TF_UINT8: TF_DataType +TF_UNAUTHENTICATED: TF_Code +TF_UNIMPLEMENTED: TF_Code +TF_UNKNOWN: TF_Code +TF_VARIANT: TF_DataType + +class ItemsView: + def __init__(self, *args, **kwargs) -> None: ... + def __iter__(self) -> Iterator: ... + def __len__(self) -> int: ... + +class KeysView: + def __init__(self, *args, **kwargs) -> None: ... + def __contains__(self, arg0: object) -> bool: ... + def __iter__(self) -> Iterator: ... + def __len__(self) -> int: ... + +class OpsById: + def __init__(self) -> None: ... + def items(self) -> ItemsView: ... + def keys(self) -> KeysView: ... + def values(self) -> ValuesView: ... + def __bool__(self) -> bool: ... + @overload + def __contains__(self, arg0: int) -> bool: ... + @overload + def __contains__(self, arg0: object) -> bool: ... + def __delitem__(self, arg0: int) -> None: ... + def __getitem__(self, arg0: int) -> object: ... + def __iter__(self) -> Iterator[int]: ... + def __len__(self) -> int: ... + def __setitem__(self, arg0: int, arg1: object) -> None: ... + +class OpsByName: + def __init__(self) -> None: ... + def items(self) -> ItemsView: ... + def keys(self) -> KeysView: ... + def values(self) -> ValuesView: ... + def __bool__(self) -> bool: ... + @overload + def __contains__(self, arg0: str) -> bool: ... + @overload + def __contains__(self, arg0: object) -> bool: ... + def __delitem__(self, arg0: str) -> None: ... + def __getitem__(self, arg0: str) -> object: ... + def __iter__(self) -> Iterator[str]: ... + def __len__(self) -> int: ... + def __setitem__(self, arg0: str, arg1: object) -> None: ... + +class PyGraph: + @classmethod + def __init__(cls, *args, **kwargs) -> None: ... + @classmethod + def Dismantle(cls, *args, **kwargs) -> Any: ... + @classmethod + def _add_op(cls, *args, **kwargs) -> Any: ... + @classmethod + def _get_operation_by_name(cls, *args, **kwargs) -> Any: ... + @classmethod + def _op_def_for_type(cls, *args, **kwargs) -> Any: ... + @classmethod + def get_operations(cls, *args, **kwargs) -> Any: ... + @classmethod + def new_operations(cls, *args, **kwargs) -> Any: ... + @classmethod + def num_operations(cls, *args, **kwargs) -> Any: ... + @property + def _nodes_by_id(self) -> OpsById: ... + @property + def _nodes_by_name(self) -> OpsByName: ... + @property + def _version_def(self) -> bytes: ... + @property + def operations(self) -> list: ... + @property + def version(self) -> int: ... + +class PyOperation: + graph: object + @classmethod + def __init__(cls, *args, **kwargs) -> None: ... + @classmethod + def _add_control_input(cls, *args, **kwargs) -> Any: ... + @classmethod + def _add_control_inputs(cls, *args, **kwargs) -> Any: ... + @classmethod + def _add_outputs(cls, *args, **kwargs) -> Any: ... + @classmethod + def _init_outputs(cls, *args, **kwargs) -> Any: ... + @classmethod + def _remove_all_control_inputs(cls, *args, **kwargs) -> Any: ... + @classmethod + def _set_device_from_string(cls, *args, **kwargs) -> Any: ... + @classmethod + def _tf_input(cls, *args, **kwargs) -> Any: ... + @classmethod + def _tf_output(cls, *args, **kwargs) -> Any: ... + @property + def _c_op(self) -> TF_Operation: ... + @property + def _control_outputs(self) -> list: ... + @property + def _is_stateful(self) -> bool: ... + @property + def _node_def(self) -> bytes: ... + @property + def _op_def(self) -> bytes: ... + @property + def control_inputs(self) -> list: ... + @property + def device(self) -> str: ... + @property + def name(self) -> str: ... + @property + def outputs(self) -> list: ... + @property + def type(self) -> str: ... + +class PyTensor: + _id: object + _name: object + _shape_val: object + @classmethod + def __init__(cls, *args, **kwargs) -> None: ... + @classmethod + def _as_tf_output(cls, *args, **kwargs) -> Any: ... + @classmethod + def _rank(cls, *args, **kwargs) -> Any: ... + @classmethod + def _set_shape(cls, *args, **kwargs) -> Any: ... + @classmethod + def consumers(cls, *args, **kwargs) -> Any: ... + @property + def _dtype(self) -> object: ... + @property + def _op(self) -> object: ... + @property + def _shape(self) -> object: ... + @property + def device(self) -> str: ... + @property + def graph(self) -> object: ... + @property + def ndim(self) -> int: ... + @property + def op(self) -> object: ... + @property + def value_index(self) -> int: ... + +class TF_ApiDefMap: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_Buffer: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_Code: + __members__: ClassVar[dict] = ... # read-only + TF_ABORTED: ClassVar[TF_Code] = ... + TF_CANCELLED: ClassVar[TF_Code] = ... + TF_DATA_LOSS: ClassVar[TF_Code] = ... + TF_DEADLINE_EXCEEDED: ClassVar[TF_Code] = ... + TF_FAILED_PRECONDITION: ClassVar[TF_Code] = ... + TF_INTERNAL: ClassVar[TF_Code] = ... + TF_INVALID_ARGUMENT: ClassVar[TF_Code] = ... + TF_OK: ClassVar[TF_Code] = ... + TF_OUT_OF_RANGE: ClassVar[TF_Code] = ... + TF_PERMISSION_DENIED: ClassVar[TF_Code] = ... + TF_RESOURCE_EXHAUSTED: ClassVar[TF_Code] = ... + TF_UNAUTHENTICATED: ClassVar[TF_Code] = ... + TF_UNIMPLEMENTED: ClassVar[TF_Code] = ... + TF_UNKNOWN: ClassVar[TF_Code] = ... + __entries: ClassVar[dict] = ... + def __init__(self, value: int) -> None: ... + def __eq__(self, other: object) -> bool: ... + def __getstate__(self) -> int: ... + def __hash__(self) -> int: ... + def __index__(self) -> int: ... + def __int__(self) -> int: ... + def __ne__(self, other: object) -> bool: ... + def __setstate__(self, state: int) -> None: ... + @property + def name(self) -> str: ... + @property + def value(self) -> int: ... + +class TF_DataType: + __members__: ClassVar[dict] = ... # read-only + TF_BFLOAT16: ClassVar[TF_DataType] = ... + TF_BOOL: ClassVar[TF_DataType] = ... + TF_COMPLEX: ClassVar[TF_DataType] = ... + TF_COMPLEX128: ClassVar[TF_DataType] = ... + TF_COMPLEX64: ClassVar[TF_DataType] = ... + TF_DOUBLE: ClassVar[TF_DataType] = ... + TF_FLOAT: ClassVar[TF_DataType] = ... + TF_HALF: ClassVar[TF_DataType] = ... + TF_INT16: ClassVar[TF_DataType] = ... + TF_INT32: ClassVar[TF_DataType] = ... + TF_INT64: ClassVar[TF_DataType] = ... + TF_INT8: ClassVar[TF_DataType] = ... + TF_QINT16: ClassVar[TF_DataType] = ... + TF_QINT32: ClassVar[TF_DataType] = ... + TF_QINT8: ClassVar[TF_DataType] = ... + TF_QUINT16: ClassVar[TF_DataType] = ... + TF_QUINT8: ClassVar[TF_DataType] = ... + TF_RESOURCE: ClassVar[TF_DataType] = ... + TF_STRING: ClassVar[TF_DataType] = ... + TF_UINT16: ClassVar[TF_DataType] = ... + TF_UINT32: ClassVar[TF_DataType] = ... + TF_UINT64: ClassVar[TF_DataType] = ... + TF_UINT8: ClassVar[TF_DataType] = ... + TF_VARIANT: ClassVar[TF_DataType] = ... + __entries: ClassVar[dict] = ... + def __init__(self, value: int) -> None: ... + def __eq__(self, other: object) -> bool: ... + def __getstate__(self) -> int: ... + def __hash__(self) -> int: ... + def __index__(self) -> int: ... + def __int__(self) -> int: ... + def __ne__(self, other: object) -> bool: ... + def __setstate__(self, state: int) -> None: ... + @property + def name(self) -> str: ... + @property + def value(self) -> int: ... + +class TF_DeprecatedSession: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_ImportGraphDefOptions: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_ImportGraphDefResults: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_Input: + index: int + oper: TF_Operation + def __init__(self) -> None: ... + +class TF_Library: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_Operation: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_OperationDescription: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_Output: + index: int + oper: TF_Operation + def __init__(self) -> None: ... + +class TF_Server: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_Session: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_SessionOptions: + def __init__(self, *args, **kwargs) -> None: ... + +class TF_Status: + def __init__(self, *args, **kwargs) -> None: ... + +class ValuesView: + def __init__(self, *args, **kwargs) -> None: ... + def __iter__(self) -> Iterator: ... + def __len__(self) -> int: ... + +def AddWhileInputHack(arg0: PyGraph, arg1: TF_Output, arg2: TF_Operation) -> None: ... +def ClearAttr(arg0: PyGraph, arg1: TF_Operation, arg2: str) -> None: ... +@overload +def EqualAttrValueWrapper(arg0: str, arg1: str) -> str: ... +@overload +def EqualAttrValueWrapper(arg0: str, arg1: str) -> str: ... +def EqualGraphDefWrapper(arg0: str, arg1: str) -> str: ... +def ExtendSession(arg0: TF_Session) -> None: ... +def GetHandleShapeAndType(arg0: PyGraph, arg1: TF_Output) -> bytes: ... +def GetOperationInputs(arg0: TF_Operation) -> list[TF_Output]: ... +def SetAttr(arg0: PyGraph, arg1: TF_Operation, arg2: str, arg3: TF_Buffer) -> None: ... +def SetFullType(arg0: PyGraph, arg1: TF_Operation, arg2: TF_Buffer) -> None: ... +def SetHandleShapeAndType(arg0: PyGraph, arg1: TF_Output, arg2: bytes) -> None: ... +def TF_AddControlInput(arg0: TF_OperationDescription, arg1: TF_Operation) -> None: ... +def TF_AddInput(arg0: TF_OperationDescription, arg1: TF_Output) -> None: ... +def TF_AddInputList(arg0: TF_OperationDescription, arg1: object) -> None: ... +def TF_ApiDefMapGet(arg0: TF_ApiDefMap, arg1: str, arg2: int) -> TF_Buffer: ... +def TF_ApiDefMapPut(arg0: TF_ApiDefMap, arg1: str, arg2: int) -> None: ... +def TF_CloseSession(arg0: TF_Session) -> None: ... +def TF_CreatePlaceholders(arg0: PyGraph, arg1: object, arg2: str) -> list[TF_Output]: ... +def TF_DeleteApiDefMap(arg0: TF_ApiDefMap) -> None: ... +def TF_DeleteBuffer(arg0: TF_Buffer) -> None: ... +@overload +def TF_DeleteDeviceList(arg0: TF_DeviceList) -> None: ... +@overload +def TF_DeleteDeviceList(arg0: TF_DeviceList) -> None: ... +def TF_DeleteFunction(arg0: TF_Function) -> None: ... +def TF_DeleteImportGraphDefOptions(arg0: TF_ImportGraphDefOptions) -> None: ... +def TF_DeleteImportGraphDefResults(arg0: TF_ImportGraphDefResults) -> None: ... +def TF_DeleteLibraryHandle(arg0: TF_Library) -> None: ... +def TF_DeleteSession(arg0: TF_Session) -> None: ... +def TF_DeleteSessionOptions(arg0: TF_SessionOptions) -> None: ... +def TF_DeleteStatus(arg0: TF_Status) -> None: ... +def TF_DeviceListCount(arg0: TF_DeviceList) -> int: ... +def TF_DeviceListIncarnation(arg0: TF_DeviceList, arg1: int) -> int: ... +def TF_DeviceListMemoryBytes(arg0: TF_DeviceList, arg1: int) -> int: ... +def TF_DeviceListName(arg0: TF_DeviceList, arg1: int) -> str: ... +def TF_DeviceListType(arg0: TF_DeviceList, arg1: int) -> str: ... +def TF_FinishOperation(arg0: TF_OperationDescription) -> TF_Operation: ... +def TF_FunctionImportFunctionDef(arg0: bytes) -> TF_Function: ... +def TF_FunctionImportFunctionDefNoSerialization(arg0) -> TF_Function: ... +def TF_FunctionSetAttrValueProto(arg0: TF_Function, arg1: str, arg2: bytes) -> None: ... +def TF_FunctionToFunctionDef(arg0: TF_Function, arg1: TF_Buffer) -> None: ... +def TF_GetAllOpList() -> TF_Buffer: ... +def TF_GetAllRegisteredKernels() -> TF_Buffer: ... +def TF_GetBuffer(arg0: TF_Buffer) -> object: ... +def TF_GetCode(arg0: TF_Status) -> TSL_Code: ... +def TF_GetOpList(arg0: TF_Library) -> object: ... +def TF_GetRegisteredKernelsForOp(arg0: str) -> TF_Buffer: ... +def TF_GetXlaAutoJitEnabled() -> int: ... +def TF_GetXlaConstantFoldingDisabled() -> int: ... +def TF_GraphCopyFunction(arg0: PyGraph, arg1: TF_Function, arg2: TF_Function) -> None: ... +def TF_GraphImportGraphDefWithResults(arg0: PyGraph, arg1: TF_Buffer, arg2: TF_ImportGraphDefOptions) -> TF_ImportGraphDefResults: ... +def TF_GraphImportGraphDefWithResultsNoSerialization(arg0: PyGraph, arg1, arg2: TF_ImportGraphDefOptions) -> TF_ImportGraphDefResults: ... +def TF_GraphNextOperation(arg0: PyGraph, arg1: int) -> tuple: ... +def TF_GraphRemoveFunction(arg0: PyGraph, arg1: str) -> None: ... +def TF_GraphSetOutputHandleShapesAndTypes_wrapper(arg0: PyGraph, arg1: TF_Output, arg2: list[Optional[list[int]]], arg3: list[int], arg4: object) -> None: ... +def TF_GraphToFunction_wrapper(arg0: PyGraph, arg1: str, arg2: bool, arg3: Optional[list[TF_Operation]], arg4: list[TF_Output], arg5: list[TF_Output], arg6: list[bytes], arg7: list[TF_Operation], arg8: list[bytes], arg9: None, arg10: str) -> TF_Function: ... +def TF_GraphToGraphDef(arg0: PyGraph, arg1: TF_Buffer) -> None: ... +def TF_GraphToGraphDefPybind(*args, **kwargs) -> Any: ... +def TF_ImportGraphDefOptionsAddInputMapping(arg0: TF_ImportGraphDefOptions, arg1: str, arg2: int, arg3: TF_Output) -> None: ... +def TF_ImportGraphDefOptionsAddReturnOperation(arg0: TF_ImportGraphDefOptions, arg1: str) -> None: ... +def TF_ImportGraphDefOptionsAddReturnOutput(arg0: TF_ImportGraphDefOptions, arg1: str, arg2: int) -> None: ... +def TF_ImportGraphDefOptionsRemapControlDependency(arg0: TF_ImportGraphDefOptions, arg1: str, arg2: TF_Operation) -> None: ... +def TF_ImportGraphDefOptionsSetPrefix(arg0: TF_ImportGraphDefOptions, arg1: str) -> None: ... +def TF_ImportGraphDefOptionsSetPropagateDeviceSpec(arg0: TF_ImportGraphDefOptions, arg1: int) -> None: ... +def TF_ImportGraphDefOptionsSetUniquifyNames(arg0: TF_ImportGraphDefOptions, arg1: int) -> None: ... +def TF_ImportGraphDefOptionsSetValidateColocationConstraints(arg0: TF_ImportGraphDefOptions, arg1: int) -> None: ... +def TF_ImportGraphDefResultsMissingUnusedInputMappings_wrapper(arg0: TF_ImportGraphDefResults) -> list[str]: ... +def TF_ImportGraphDefResultsReturnOperations(arg0: TF_ImportGraphDefResults) -> list: ... +def TF_ImportGraphDefResultsReturnOutputs(arg0: TF_ImportGraphDefResults) -> list: ... +def TF_LoadLibrary(arg0: str) -> TF_Library: ... +def TF_LoadPluggableDeviceLibrary(arg0: str) -> TF_Library: ... +def TF_NewApiDefMap(arg0: TF_Buffer) -> TF_ApiDefMap: ... +def TF_NewBuffer() -> TF_Buffer: ... +def TF_NewBufferFromString(arg0: bytes) -> TF_Buffer: ... +def TF_NewImportGraphDefOptions() -> TF_ImportGraphDefOptions: ... +def TF_NewOperation(arg0: PyGraph, arg1: str, arg2: str) -> TF_OperationDescription: ... +def TF_NewServer(arg0: bytes) -> TF_Server: ... +def TF_NewSession(arg0: PyGraph, arg1: TF_SessionOptions) -> TF_Session: ... +def TF_NewSessionRef(arg0: PyGraph, arg1: TF_SessionOptions) -> TF_Session: ... +def TF_NewStatus() -> TF_Status: ... +def TF_OperationDevice(arg0: TF_Operation) -> str: ... +def TF_OperationGetAttrBool(arg0: TF_Operation, arg1: str) -> object: ... +def TF_OperationGetAttrInt(arg0: TF_Operation, arg1: str) -> object: ... +def TF_OperationGetAttrType(arg0: TF_Operation, arg1: str) -> TF_DataType: ... +def TF_OperationGetAttrValueProto(arg0: TF_Operation, arg1: str, arg2: TF_Buffer) -> None: ... +def TF_OperationGetControlOutputs_wrapper(arg0: TF_Operation) -> list[TF_Operation]: ... +def TF_OperationGetStackTrace(arg0: TF_Operation) -> object: ... +def TF_OperationInputType(arg0: TF_Input) -> TF_DataType: ... +def TF_OperationName(arg0: TF_Operation) -> str: ... +def TF_OperationNumInputs(arg0: TF_Operation) -> int: ... +def TF_OperationNumOutputs(arg0: TF_Operation) -> int: ... +def TF_OperationOpType(arg0: TF_Operation) -> str: ... +def TF_OperationOutputType(arg0: TF_Output) -> TF_DataType: ... +def TF_OperationToNodeDef(arg0: TF_Operation, arg1: TF_Buffer) -> None: ... +def TF_PluggableDeviceLibraryHandle(arg0: TF_Library) -> None: ... +def TF_RegisterFilesystemPlugin(arg0: str) -> None: ... +def TF_Reset_wrapper(arg0: TF_SessionOptions, arg1: list[bytes]) -> None: ... +def TF_ServerJoin(arg0: TF_Server) -> None: ... +def TF_ServerStart(arg0: TF_Server) -> None: ... +def TF_ServerStop(arg0: TF_Server) -> None: ... +def TF_ServerTarget(arg0: TF_Server) -> str: ... +def TF_SessionListDevices(arg0: TF_Session) -> TF_DeviceList: ... +def TF_SessionMakeCallable(arg0: TF_Session, arg1: TF_Buffer) -> int: ... +def TF_SessionPRunSetup_wrapper(arg0: TF_Session, arg1: list[TF_Output], arg2: list[TF_Output], arg3: list[TF_Operation]) -> str: ... +def TF_SessionPRun_wrapper(arg0: TF_Session, arg1: str, arg2: object, arg3: list[TF_Output]) -> object: ... +def TF_SessionReleaseCallable(arg0: TF_Session, arg1: int) -> None: ... +def TF_SessionRunCallable(arg0: TF_Session, arg1: int, arg2: object, arg3: TF_Buffer) -> list: ... +def TF_SessionRun_wrapper(arg0: TF_Session, arg1: TF_Buffer, arg2: object, arg3: list[TF_Output], arg4: list[TF_Operation], arg5: TF_Buffer) -> object: ... +def TF_SetAttrValueProto(arg0: TF_OperationDescription, arg1: str, arg2: bytes) -> None: ... +def TF_SetDevice(arg0: TF_OperationDescription, arg1: str) -> None: ... +def TF_SetOpStackTrace(arg0: TF_Operation, arg1) -> None: ... +def TF_SetTfXlaCpuGlobalJit(arg0: int) -> int: ... +def TF_SetXlaAutoJitMode(arg0: str) -> None: ... +def TF_SetXlaConstantFoldingDisabled(arg0: int) -> None: ... +def TF_SetXlaEnableLazyCompilation(arg0: int) -> int: ... +def TF_SetXlaMinClusterSize(arg0: int) -> None: ... +def TF_TryEvaluateConstant_wrapper(arg0: PyGraph, arg1: TF_Output) -> object: ... +def UpdateEdge(arg0: PyGraph, arg1: TF_Output, arg2: TF_Input) -> None: ... +def _TF_NewSessionOptions() -> TF_SessionOptions: ... +def _TF_SetConfig(arg0: TF_SessionOptions, arg1: bytes) -> None: ... +def _TF_SetTarget(arg0: TF_SessionOptions, arg1: str) -> None: ... +def get_compiler_version() -> str: ... +def get_cxx11_abi_flag() -> int: ... +def get_cxx_version() -> int: ... +def get_eigen_max_align_bytes() -> int: ... +def get_git_version() -> str: ... +def get_graph_def_version() -> int: ... +def get_graph_def_version_min_consumer() -> int: ... +def get_graph_def_version_min_producer() -> int: ... +def get_monolithic_build() -> int: ... +def get_tensor_handle_key() -> str: ... +def get_version() -> str: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so new file mode 100644 index 0000000000000000000000000000000000000000..287f3b04ed3332a09ce79223c2230ac43bee44ce --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b4e4ed02c661591313ed8b19cd2ea32042c169995e4952c6762a2b96c44395b +size 2883720 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/client_lib.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/client_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..83bfbd6436d3524b43fae9ba48eca43a8544bc57 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/client_lib.py @@ -0,0 +1,28 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Support for launching graphs and executing operations. + +See the [Client](https://www.tensorflow.org/guide/graphs) guide. +""" + +# pylint: disable=unused-import +from tensorflow.python.client.session import InteractiveSession +from tensorflow.python.client.session import Session + +from tensorflow.python.framework import errors +from tensorflow.python.framework.errors import OpError + +from tensorflow.python.framework.ops import get_default_session diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/device_lib.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/device_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..06f610330cae1cf937ee4c07ba342c6a8458dd08 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/device_lib.py @@ -0,0 +1,42 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A Python interface for creating TensorFlow servers.""" + +from tensorflow.core.framework import device_attributes_pb2 +# pylint: disable=invalid-import-order, g-bad-import-order, wildcard-import, unused-import, undefined-variable +from tensorflow.python import pywrap_tensorflow +from tensorflow.python.client import _pywrap_device_lib + + +def list_local_devices(session_config=None): + """List the available devices available in the local process. + + Args: + session_config: a session config proto or None to use the default config. + + Returns: + A list of `DeviceAttribute` protocol buffers. + """ + def _convert(pb_str): + m = device_attributes_pb2.DeviceAttributes() + m.ParseFromString(pb_str) + return m + + serialized_config = None + if session_config is not None: + serialized_config = session_config.SerializeToString() + return [ + _convert(s) for s in _pywrap_device_lib.list_devices(serialized_config) + ] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/pywrap_tf_session.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/pywrap_tf_session.py new file mode 100644 index 0000000000000000000000000000000000000000..b4586c7d62398248b3322a89c32e0110deea14ee --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/pywrap_tf_session.py @@ -0,0 +1,70 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python module for Session ops, vars, and functions exported by pybind11.""" + +# pylint: disable=invalid-import-order,g-bad-import-order, wildcard-import, unused-import +from tensorflow.python import pywrap_tensorflow +from tensorflow.python.client._pywrap_tf_session import * +from tensorflow.python.client._pywrap_tf_session import _TF_SetTarget +from tensorflow.python.client._pywrap_tf_session import _TF_SetConfig +from tensorflow.python.client._pywrap_tf_session import _TF_NewSessionOptions + +# Register pybind11 type caster for StackTraceWrapper/AbstractStackTrace +from tensorflow.python.util import tf_stack + +# Convert versions to strings for Python2 and keep api_compatibility_test green. +# We can remove this hack once we remove Python2 presubmits. pybind11 can only +# return unicode for Python2 even with py::str. +# https://pybind11.readthedocs.io/en/stable/advanced/cast/strings.html#returning-c-strings-to-python +# pylint: disable=undefined-variable +__version__ = str(get_version()) +__git_version__ = str(get_git_version()) +__compiler_version__ = str(get_compiler_version()) +__cxx11_abi_flag__ = get_cxx11_abi_flag() +__cxx_version__ = get_cxx_version() +__monolithic_build__ = get_monolithic_build() + +# User getters to hold attributes rather than pybind11's m.attr due to +# b/145559202. +GRAPH_DEF_VERSION = get_graph_def_version() +GRAPH_DEF_VERSION_MIN_CONSUMER = get_graph_def_version_min_consumer() +GRAPH_DEF_VERSION_MIN_PRODUCER = get_graph_def_version_min_producer() +TENSOR_HANDLE_KEY = get_tensor_handle_key() + +# pylint: enable=undefined-variable + + +# Disable pylint invalid name warnings for legacy functions. +# pylint: disable=invalid-name +def TF_NewSessionOptions(target=None, config=None): + # NOTE: target and config are validated in the session constructor. + opts = _TF_NewSessionOptions() + if target is not None: + _TF_SetTarget(opts, target) + if config is not None: + config_str = config.SerializeToString() + _TF_SetConfig(opts, config_str) + return opts + + +# Disable pylind undefined-variable as the variable is exported in the shared +# object via pybind11. +# pylint: disable=undefined-variable +def TF_Reset(target, containers=None, config=None): + opts = TF_NewSessionOptions(target=target, config=config) + try: + TF_Reset_wrapper(opts, containers) + finally: + TF_DeleteSessionOptions(opts) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/session.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/session.py new file mode 100644 index 0000000000000000000000000000000000000000..87b794fe094156e1b501c8b2bf1e172f6b087951 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/session.py @@ -0,0 +1,1835 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A client interface for TensorFlow.""" + +import collections +import functools +import re +import threading + +import numpy as np +import wrapt + +from tensorflow.core.protobuf import config_pb2 +from tensorflow.core.protobuf import rewriter_config_pb2 +from tensorflow.python.client import pywrap_tf_session as tf_session +from tensorflow.python.eager import context +from tensorflow.python.eager import monitoring +from tensorflow.python.framework import device +from tensorflow.python.framework import error_interpolation +from tensorflow.python.framework import errors +from tensorflow.python.framework import indexed_slices +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import stack +from tensorflow.python.framework import tensor +from tensorflow.python.ops import session_ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.training.experimental import mixed_precision_global_state +from tensorflow.python.util import compat +from tensorflow.python.util import deprecation +from tensorflow.python.util import nest +from tensorflow.python.util import numpy_compat +from tensorflow.python.util.compat import collections_abc +from tensorflow.python.util.tf_export import tf_export + + +_python_session_create_counter = monitoring.Counter( + '/tensorflow/api/python/session_create_counter', + 'Counter for number of sessions created in Python.') + + +class SessionInterface(object): + """Base class for implementations of TensorFlow client sessions.""" + + @property + def graph(self): + """The underlying TensorFlow graph, to be used in building Operations.""" + raise NotImplementedError('graph') + + @property + def sess_str(self): + """The TensorFlow process to which this session will connect.""" + raise NotImplementedError('sess_str') + + def run(self, fetches, feed_dict=None, options=None, run_metadata=None): + """Runs operations in the session. See `BaseSession.run()` for details.""" + raise NotImplementedError('run') + + def partial_run_setup(self, fetches, feeds=None): + """Sets up the feeds and fetches for partial runs in the session.""" + raise NotImplementedError('partial_run_setup') + + def partial_run(self, handle, fetches, feed_dict=None): + """Continues the execution with additional feeds and fetches.""" + raise NotImplementedError('partial_run') + + +def _get_indexed_slices_value_from_fetches(fetched_vals): + return indexed_slices.IndexedSlicesValue( + fetched_vals[0], fetched_vals[1], + fetched_vals[2] if len(fetched_vals) == 3 else None) + + +def _get_feeds_for_indexed_slices(feed, feed_val): + return list( + zip([feed.values, feed.indices] if feed.dense_shape is None else + [feed.values, feed.indices, feed.dense_shape], feed_val)) + + +# List of extensions supported to convert run arguments into actual fetches and +# feeds. +# +# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2), +# where the function signatures are: +# fetch_fn : Type -> (list of Tensors, +# lambda: list of fetched np.ndarray -> TypeVal) +# feed_fn1 : Type, TypeVal -> list of (Tensor, value) +# feed_fn2 : Type -> list of Tensors +# +# `fetch_fn` describes how to expand fetch into its +# component Tensors and how to contract the fetched results back into +# a single return value. +# +# Each feed function describes how to unpack a single fed value and map it to +# feeds of one or more tensors and their corresponding values: `feed_fn1` is +# used to feed a run, `feed_fn2` to set up a partial run. +# +# TODO(touts): We could reimplement these as specialized _FeedMapper +# implementations after we refactor the feed handling code to use them. +# +# Eventually, this registration could be opened up to support custom Tensor +# expansions. +# pylint: disable=g-long-lambda +_REGISTERED_EXPANSIONS = [ + # SparseTensors are fetched as SparseTensorValues. They can be fed + # SparseTensorValues or normal tuples. + (sparse_tensor.SparseTensor, lambda fetch: ([ + fetch.indices, fetch.values, fetch.dense_shape + ], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)), + lambda feed, feed_val: list( + zip([feed.indices, feed.values, feed.dense_shape], feed_val)), + lambda feed: [feed.indices, feed.values, feed.dense_shape]), + # IndexedSlices are fetched as IndexedSlicesValues. They can be fed + # IndexedSlicesValues or normal tuples. + (indexed_slices.IndexedSlices, + lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None + else [fetch.values, fetch.indices, fetch.dense_shape + ], _get_indexed_slices_value_from_fetches), + _get_feeds_for_indexed_slices, + lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else + [feed.values, feed.indices, feed.dense_shape]), + # The default catches all other types and performs no expansions. + (object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]), + lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed]) +] + +# pylint: enable=g-long-lambda + + +def _convert_to_numpy_obj(numpy_dtype, obj): + """Explicitly convert obj based on numpy type except for string type.""" + return ( + numpy_dtype(np.array(obj).astype(numpy_dtype)) + if numpy_dtype is not object + else str(obj) + ) + + +def register_session_run_conversion_functions( + tensor_type, + fetch_function, + feed_function=None, + feed_function_for_partial_run=None, +): + """Register fetch and feed conversion functions for `tf.Session.run()`. + + This function registers a triple of conversion functions for fetching and/or + feeding values of user-defined types in a call to tf.Session.run(). + + An example + + ```python + class SquaredTensor(object): + def __init__(self, tensor): + self.sq = tf.square(tensor) + #you can define conversion functions as follows: + fetch_function = lambda squared_tensor:([squared_tensor.sq], + lambda val: val[0]) + feed_function = lambda feed, feed_val: [(feed.sq, feed_val)] + feed_function_for_partial_run = lambda feed: [feed.sq] + #then after invoking this register function, you can use as follows: + session.run(squared_tensor1, + feed_dict = {squared_tensor2 : some_numpy_array}) + ``` + + Args: + tensor_type: The type for which you want to register a conversion function. + fetch_function: A callable that takes an object of type `tensor_type` and + returns a tuple, where the first element is a list of `tf.Tensor` objects, + and the second element is a callable that takes a list of ndarrays and + returns an object of some value type that corresponds to `tensor_type`. + fetch_function describes how to expand fetch into its component Tensors + and how to contract the fetched results back into a single return value. + feed_function: A callable that takes feed_key and feed_value as input, and + returns a list of tuples (feed_tensor, feed_val), feed_key must have type + `tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed + function describes how to unpack a single fed value and map it to feeds of + one or more tensors and their corresponding values. + feed_function_for_partial_run: A callable for specifying tensor values to + feed when setting up a partial run, which takes a `tensor_type` type + object as input, and returns a list of Tensors. + + Raises: + ValueError: If `tensor_type` has already been registered. + """ + for conversion_function in _REGISTERED_EXPANSIONS: + if issubclass(conversion_function[0], tensor_type): + raise ValueError(f'{tensor_type} has already been registered so ignore ' + 'it.') + + _REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function, + feed_function_for_partial_run)) + + +def _is_attrs_instance(obj): + """Returns True if the given obj is an instance of attrs-decorated class.""" + return getattr(obj.__class__, '__attrs_attrs__', None) is not None + + +def _get_attrs_values(obj): + """Returns the list of values from an attrs instance.""" + attrs = getattr(obj.__class__, '__attrs_attrs__') + return [getattr(obj, a.name) for a in attrs] + + +class _FetchMapper(object): + """Definition of the interface provided by fetch mappers. + + Fetch mappers are utility classes used by the _FetchHandler to handle + arbitrary structures for the `fetch` argument to `Session.run()`. + + The `fetch` argument can be of various shapes: single tensor or op, list of + fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The + structures can be arbitrarily nested. + + The low level run() API only wants a list of tensor or op names. The various + `_FetchMapper` subclasses below take care of handling the different shapes: + uniquifying the fetches, and constructing results with the original shape. + """ + + def unique_fetches(self): + """Return the list of unique tensors or ops needed by this fetch mapper. + + Returns: + A list of tensors or ops. + """ + raise NotImplementedError( + 'unique_fetches must be implemented by subclasses') + + def build_results(self, values): + """Build results that match the original shape of the fetch. + + Args: + values: List of values returned by run(). The values correspond exactly to + the list tensors or ops returned by unique_fetches(). + + Returns: + A struct of the same shape as the original fetch object handled by + this fetch mapper. In the returned struct, the original fetches are + replaced by their fetched values. + """ + raise NotImplementedError('build_results must be implemented by subclasses') + + @staticmethod + def for_fetch(fetch): + """Creates fetch mapper that handles the structure of `fetch`. + + The default graph must be the one from which we want to fetch values when + this function is called. + + Args: + fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple, + or dict. + + Returns: + An instance of a subclass of `_FetchMapper` that handles the shape. + """ + if fetch is None: + raise TypeError(f'Argument `fetch` = {fetch} has invalid type ' + f'"{type(fetch).__name__}". Cannot be None') + elif isinstance(fetch, (list, tuple)): + # NOTE(touts): This is also the code path for namedtuples. + return _ListFetchMapper(fetch) + elif isinstance(fetch, collections_abc.Mapping): + return _DictFetchMapper(fetch) + elif _is_attrs_instance(fetch): + return _AttrsFetchMapper(fetch) + else: + # Look for a handler in the registered expansions. + for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS: + if isinstance(fetch, tensor_type): + fetches, contraction_fn = fetch_fn(fetch) + return _ElementFetchMapper(fetches, contraction_fn) + # Did not find anything. + raise TypeError(f'Argument `fetch` = {fetch} has invalid type ' + f'"{type(fetch).__name__}"') + + +class _ElementFetchMapper(_FetchMapper): + """Fetch mapper for singleton tensors and ops.""" + + def __init__(self, fetches, contraction_fn): + """Creates an _ElementFetchMapper. + + This is the fetch mapper used for leaves in the fetch struct. Because of + the expansions mechanism, a leaf can actually fetch more than one tensor. + + Also note that the fetches here can be just strings (tensor or op names) or + any other object that the graph knows how to convert to a tensor, such as a + Variable. So we have to run each fetch through `as_graph_element()` to get + the corresponding tensor or op. + + Args: + fetches: List of objects, as returned by a fetch_fn defined in + _REGISTERED_EXPANSIONS. + contraction_fn: Callable as returned by a fetch_fn. + """ + self._unique_fetches = [] + for fetch in fetches: + try: + self._unique_fetches.append(ops.get_default_graph().as_graph_element( + fetch, allow_tensor=True, allow_operation=True)) + except TypeError as e: + raise TypeError(f'Argument `fetch` = {fetch} has invalid type ' + f'"{type(fetch).__name__}" must be a string or Tensor. ' + f'({str(e)})') + except ValueError as e: + raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as ' + f'a Tensor. ({str(e)})') + except KeyError as e: + raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as ' + f'a Tensor. ({str(e)})') + self._contraction_fn = contraction_fn + + def unique_fetches(self): + return self._unique_fetches + + def build_results(self, values): + if not values: + # 'Operation' case + return None + else: + return self._contraction_fn(values) + + +def _uniquify_fetches(fetch_mappers): + """Uniquifies fetches from a list of fetch_mappers. + + This is a utility function used by _ListFetchMapper and _DictFetchMapper. It + gathers all the unique fetches from a list of mappers and builds a list + containing all of them but without duplicates (unique_fetches). + + It also returns a 2-D list of integers (values_indices) indicating at which + index in unique_fetches the fetches of the mappers are located. + + This list is as follows: + values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index + + Args: + fetch_mappers: list of fetch mappers. + + Returns: + A list of fetches. + A 2-D list of integers. + """ + unique_fetches = [] + value_indices = [] + seen_fetches = {} + for m in fetch_mappers: + m_value_indices = [] + for f in m.unique_fetches(): + j = seen_fetches.get(id(f)) + if j is None: + j = len(seen_fetches) + seen_fetches[id(f)] = j + unique_fetches.append(f) + m_value_indices.append(j) + value_indices.append(m_value_indices) + return unique_fetches, value_indices + + +class _ListFetchMapper(_FetchMapper): + """Fetch mapper for lists, tuples, and namedtuples.""" + + def __init__(self, fetches): + """Creates a _ListFetchMapper. + + Args: + fetches: List, tuple, or namedtuple of fetches. + """ + if isinstance(fetches, wrapt.ObjectProxy): + self._fetch_type = type(fetches.__wrapped__) + else: + self._fetch_type = type(fetches) + self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches] + self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) + + def unique_fetches(self): + return self._unique_fetches + + def build_results(self, values): + # Create the list of results for each mapper. + results = [] + for m, vi in zip(self._mappers, self._value_indices): + results.append(m.build_results([values[j] for j in vi])) + # Return a value of the original type of the fetches. + if issubclass(self._fetch_type, list): + return results + elif self._fetch_type == tuple: + return tuple(results) + else: + # This is the code path for namedtuple. + return self._fetch_type(*results) + + +class _DictFetchMapper(_FetchMapper): + """Fetch mapper for dicts.""" + + def __init__(self, fetches): + """Creates a _DictFetchMapper. + + Args: + fetches: Dict of fetches. + """ + self._fetch_type = type(fetches) + if isinstance(fetches, collections.defaultdict): + self._type_ctor = functools.partial(collections.defaultdict, + fetches.default_factory) + else: + self._type_ctor = self._fetch_type + + self._keys = fetches.keys() + self._mappers = [ + _FetchMapper.for_fetch(fetch) for fetch in fetches.values() + ] + self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) + + def unique_fetches(self): + return self._unique_fetches + + def build_results(self, values): + + def _generator(): + for k, m, vi in zip(self._keys, self._mappers, self._value_indices): + yield k, m.build_results([values[j] for j in vi]) + + return self._type_ctor(_generator()) + + +class _AttrsFetchMapper(_FetchMapper): + """Fetch mapper for attrs decorated classes.""" + + def __init__(self, fetches): + """Creates a _AttrsFetchMapper. + + Args: + fetches: An instance of an attrs decorated class. + """ + values = _get_attrs_values(fetches) + self._fetch_type = type(fetches) + self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values] + self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) + + def unique_fetches(self): + return self._unique_fetches + + def build_results(self, values): + results = [] + for m, vi in zip(self._mappers, self._value_indices): + results.append(m.build_results([values[j] for j in vi])) + return self._fetch_type(*results) + + +class _FetchHandler(object): + """Handler for structured fetches. + + Given a graph, a user-provided structure for fetches, and a feed dict, this + class takes care of generating a list of tensor names to fetch and op names + to run for a low level `run()` call. + + Given the results of the low level run call, this class can also rebuild a + result structure matching the user-provided structure for fetches, but + containing the corresponding results. + """ + + # TODO(touts): Make this class also take care of destructuring the feed + # dict instead of doing it in the callers. + + def __init__(self, graph, fetches, feeds, feed_handles=None): + """Creates a fetch handler. + + Args: + graph: Graph of the fetches. Used to check for fetchability and to + convert all fetches to tensors or ops as needed. + fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple, + or dict. + feeds: A feed dict where keys are Tensors. + feed_handles: A dict from feed Tensors to TensorHandle objects used as + direct feeds. + """ + with graph.as_default(): + self._fetch_mapper = _FetchMapper.for_fetch(fetches) + self._fetches = [] + self._targets = [] + self._feeds = feeds + self._feed_handles = feed_handles or {} + self._ops = [] + self._fetch_handles = {} + for fetch in self._fetch_mapper.unique_fetches(): + if isinstance(fetch, ops.Operation): + self._assert_fetchable(graph, fetch) + self._targets.append(fetch) + self._ops.append(True) + else: + self._assert_fetchable(graph, fetch.op) + self._fetches.append(fetch) + self._ops.append(False) + # Remember the fetch if it is for a tensor handle. + if (isinstance(fetch, tensor.Tensor) and + (fetch.op.type == 'GetSessionHandle' or + fetch.op.type == 'GetSessionHandleV2')): + self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype + self._final_fetches = [x for x in self._fetches if x.ref() not in feeds] + + def _assert_fetchable(self, graph, op): + if not graph.is_fetchable(op): + raise errors.InaccessibleTensorError( + f'Operation {op.name} has been marked as not fetchable. Typically ' + 'this happens when it is defined in another function or code block. ' + 'Use return values, explicit Python locals or TensorFlow collections ' + 'to access it.') + + def fetches(self): + """Return the unique names of tensors to fetch. + + Returns: + A list of strings. + """ + return self._final_fetches + + def targets(self): + """Return the unique names of ops to run. + + Returns: + A list of strings. + """ + return self._targets + + def build_results(self, session, tensor_values): + """Build results matching the original fetch shape. + + `tensor_values` must be a list of the same length as + the one returned by `fetches()`, and holding the requested + fetch values. + + This method builds a struct with the same shape as the original `fetches` + passed to the constructor, in which the fetches are replaced by their + fetched value. + + Args: + session: The enclosing session. Used for tensor handles. + tensor_values: List of values matching the list returned by fetches(). + + Returns: + A structure of the same shape as the original `fetches` argument but + containing tensors or None (for fetched ops). + """ + full_values = [] + assert len(self._final_fetches) == len(tensor_values) + i = 0 + j = 0 + for is_op in self._ops: + if is_op: + full_values.append(None) + else: + # If the fetch was in the feeds, use the fed value, otherwise + # use the returned value. + if self._fetches[i].ref() in self._feed_handles: + # A fetch had a corresponding direct TensorHandle feed. Call eval() + # to obtain the Tensor value from the TensorHandle. + value = self._feed_handles[self._fetches[i].ref()].eval() + else: + value = self._feeds.get(self._fetches[i].ref()) + if value is None: + value = tensor_values[j] + j += 1 + dtype = self._fetch_handles.get(self._fetches[i].ref()) + if dtype: + full_values.append(session_ops.TensorHandle(value, dtype, session)) + else: + full_values.append(value) + i += 1 + assert j == len(tensor_values) + return self._fetch_mapper.build_results(full_values) + + +def _name_list(tensor_list): + """Utility function for transitioning to the new session API. + + Args: + tensor_list: a list of `Tensor`s. + + Returns: + A list of each `Tensor`s name (as byte arrays). + """ + return [compat.as_bytes(t.name) for t in tensor_list] + + +class _DeviceAttributes(object): + """Struct-like object describing a device's attributes. + + Each device has 3 key properties: + - name: the fully-qualified TensorFlow path to the device. For + example: /job:worker/replica:0/task:3/device:CPU:0 + - device_type: the type of the device (e.g. CPU, GPU, TPU, etc.) + - memory_limit_bytes: the maximum amount of memory available on the device + (in bytes). + """ + + def __init__(self, name, device_type, memory_limit_bytes, incarnation): + self._name = device.canonical_name(name) + self._device_type = device_type + self._memory_limit_bytes = memory_limit_bytes + self._incarnation = incarnation + + @property + def name(self): + return self._name + + @property + def device_type(self): + return self._device_type + + @property + def memory_limit_bytes(self): + return self._memory_limit_bytes + + @property + def incarnation(self): + return self._incarnation + + def __repr__(self): + return '_DeviceAttributes(%s, %s, %d, %d)' % ( + self.name, + self.device_type, + self.memory_limit_bytes, + self.incarnation, + ) + + +class BaseSession(SessionInterface): + """A class for interacting with a TensorFlow computation. + + The BaseSession enables incremental graph building with inline + execution of Operations and evaluation of Tensors. + """ + + def __init__(self, target='', graph=None, config=None): + """Constructs a new TensorFlow session. + + Args: + target: (Optional) The TensorFlow execution engine to connect to. + graph: (Optional) The graph to be used. If this argument is None, the + default graph will be used. + config: (Optional) ConfigProto proto used to configure the session. If no + config is specified, the global default will be used. The global default + can be configured via the tf.config APIs. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + creating the TensorFlow session. + TypeError: If one of the arguments has the wrong type. + """ + _python_session_create_counter.get_cell().increase_by(1) + if graph is None: + self._graph = ops.get_default_graph() + else: + if not isinstance(graph, ops.Graph): + raise TypeError('Argument `graph` must be a tf.Graph, but got ' + f'"{type(graph).__name__}"') + self._graph = graph + + self._closed = False + + if target is not None: + try: + self._target = compat.as_bytes(target) + except TypeError: + if isinstance(target, config_pb2.ConfigProto): + raise TypeError('Argument `target` must be a string, but got ' + f'"{type(target).__name__}". Did you do ' + '"Session(config)" instead of ' + '"Session(config=config)"?') + raise TypeError('Argument `target` must be a string, but got ' + f'"{type(target).__name__}"') + else: + self._target = None + + self._delete_lock = threading.Lock() + self._dead_handles = [] + + if config is None: + config = context.context().config + + if not isinstance(config, config_pb2.ConfigProto): + raise TypeError('Argument `config` must be a tf.ConfigProto, but got ' + f'"{type(config).__name__}"') + + if (mixed_precision_global_state.is_mixed_precision_graph_rewrite_enabled() + and config.graph_options.rewrite_options.auto_mixed_precision != + rewriter_config_pb2.RewriterConfig.OFF): + new_config = config_pb2.ConfigProto() + new_config.CopyFrom(config) + new_config.graph_options.rewrite_options.auto_mixed_precision = ( + rewriter_config_pb2.RewriterConfig.ON) + config = new_config + elif (config.graph_options.rewrite_options.auto_mixed_precision != + rewriter_config_pb2.RewriterConfig.ON): + mixed_precision_global_state.set_non_mixed_precision_session_created(True) + + self._config = config + self._add_shapes = config.graph_options.infer_shapes + + self._session = None + opts = tf_session.TF_NewSessionOptions(target=self._target, config=config) + try: + # pylint: disable=protected-access + with self._graph._c_graph.get() as c_graph: + self._session = tf_session.TF_NewSessionRef(c_graph, opts) + # pylint: enable=protected-access + finally: + tf_session.TF_DeleteSessionOptions(opts) + + def list_devices(self): + """Lists available devices in this session. + + ```python + devices = sess.list_devices() + for d in devices: + print(d.name) + ``` + + Where: + Each element in the list has the following properties + name: A string with the full name of the device. ex: + `/job:worker/replica:0/task:3/device:CPU:0` + device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.) + memory_limit: The maximum amount of memory available on the device. + Note: depending on the device, it is possible the usable memory could + be substantially less. + + Raises: + tf.errors.OpError: If it encounters an error (e.g. session is in an + invalid state, or network errors occur). + + Returns: + A list of devices in the session. + """ + raw_device_list = tf_session.TF_SessionListDevices(self._session) + device_list = [] + size = tf_session.TF_DeviceListCount(raw_device_list) + for i in range(size): + name = tf_session.TF_DeviceListName(raw_device_list, i) + device_type = tf_session.TF_DeviceListType(raw_device_list, i) + memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i) + incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i) + device_list.append( + _DeviceAttributes(name, device_type, memory, incarnation)) + tf_session.TF_DeleteDeviceList(raw_device_list) + return device_list + + def close(self): + """Closes this session. + + Calling this method frees all resources associated with the session. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + closing the TensorFlow session. + """ + if self._session and not self._closed: + self._closed = True + tf_session.TF_CloseSession(self._session) + + def __del__(self): + # cleanly ignore all exceptions + try: + self.close() + except Exception: # pylint: disable=broad-except + pass + if self._session is not None: + try: + tf_session.TF_DeleteSession(self._session) + except (AttributeError, TypeError): + # At shutdown, `c_api_util`, `tf_session`, or + # `tf_session.TF_DeleteSession` may have been garbage collected, causing + # the above method calls to fail. In this case, silently leak since the + # program is about to terminate anyway. + pass + self._session = None + + @property + def graph(self): + """The graph that was launched in this session.""" + return self._graph + + @property + def graph_def(self): + """A serializable version of the underlying TensorFlow graph. + + Returns: + A graph_pb2.GraphDef proto containing nodes for all of the Operations in + the underlying TensorFlow graph. + """ + return self._graph.as_graph_def(add_shapes=self._add_shapes) + + @property + def sess_str(self): + return self._target + + def as_default(self): + """Returns a context manager that makes this object the default session. + + Use with the `with` keyword to specify that calls to + `tf.Operation.run` or `tf.Tensor.eval` should be executed in + this session. + + ```python + c = tf.constant(..) + sess = tf.compat.v1.Session() + + with sess.as_default(): + assert tf.compat.v1.get_default_session() is sess + print(c.eval()) + ``` + + To get the current default session, use `tf.compat.v1.get_default_session`. + + *N.B.* The `as_default` context manager *does not* close the + session when you exit the context, and you must close the session + explicitly. + + ```python + c = tf.constant(...) + sess = tf.compat.v1.Session() + with sess.as_default(): + print(c.eval()) + # ... + with sess.as_default(): + print(c.eval()) + + sess.close() + ``` + + Alternatively, you can use `with tf.compat.v1.Session():` to create a + session that is automatically closed on exiting the context, + including when an uncaught exception is raised. + + *N.B.* The default session is a property of the current thread. If you + create a new thread, and wish to use the default session in that + thread, you must explicitly add a `with sess.as_default():` in that + thread's function. + + *N.B.* Entering a `with sess.as_default():` block does not affect + the current default graph. If you are using multiple graphs, and + `sess.graph` is different from the value of + `tf.compat.v1.get_default_graph`, you must explicitly enter a + `with sess.graph.as_default():` block to make `sess.graph` the default + graph. + + Returns: + A context manager using this session as the default session. + """ + return stack.default_session(self) + + def run(self, fetches, feed_dict=None, options=None, run_metadata=None): + """Runs operations and evaluates tensors in `fetches`. + + This method runs one "step" of TensorFlow computation, by + running the necessary graph fragment to execute every `Operation` + and evaluate every `Tensor` in `fetches`, substituting the values in + `feed_dict` for the corresponding input values. + + The `fetches` argument may be a single graph element, or an arbitrarily + nested list, tuple, namedtuple, dict, or OrderedDict containing graph + elements at its leaves. A graph element can be one of the following types: + + * A `tf.Operation`. + The corresponding fetched value will be `None`. + * A `tf.Tensor`. + The corresponding fetched value will be a numpy ndarray containing the + value of that tensor. + * A `tf.sparse.SparseTensor`. + The corresponding fetched value will be a + `tf.compat.v1.SparseTensorValue` + containing the value of that sparse tensor. + * A `get_tensor_handle` op. The corresponding fetched value will be a + numpy ndarray containing the handle of that tensor. + * A `string` which is the name of a tensor or operation in the graph. + + The value returned by `run()` has the same shape as the `fetches` argument, + where the leaves are replaced by the corresponding values returned by + TensorFlow. + + Example: + + ```python + a = tf.constant([10, 20]) + b = tf.constant([1.0, 2.0]) + # 'fetches' can be a singleton + v = session.run(a) + # v is the numpy array [10, 20] + # 'fetches' can be a list. + v = session.run([a, b]) + # v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the + # 1-D array [1.0, 2.0] + # 'fetches' can be arbitrary lists, tuples, namedtuple, dicts: + MyData = collections.namedtuple('MyData', ['a', 'b']) + v = session.run({'k1': MyData(a, b), 'k2': [b, a]}) + # v is a dict with + # v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and + # 'b' (the numpy array [1.0, 2.0]) + # v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array + # [10, 20]. + ``` + + The optional `feed_dict` argument allows the caller to override + the value of tensors in the graph. Each key in `feed_dict` can be + one of the following types: + + * If the key is a `tf.Tensor`, the + value may be a Python scalar, string, list, or numpy ndarray + that can be converted to the same `dtype` as that + tensor. Additionally, if the key is a + `tf.compat.v1.placeholder`, the shape of + the value will be checked for compatibility with the placeholder. + * If the key is a + `tf.sparse.SparseTensor`, + the value should be a + `tf.compat.v1.SparseTensorValue`. + * If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value + should be a nested tuple with the same structure that maps to their + corresponding values as above. + + Each value in `feed_dict` must be convertible to a numpy array of the dtype + of the corresponding key. + + The optional `options` argument expects a [`RunOptions`] proto. The options + allow controlling the behavior of this particular step (e.g. turning tracing + on). + + The optional `run_metadata` argument expects a [`RunMetadata`] proto. When + appropriate, the non-Tensor output of this step will be collected there. For + example, when users turn on tracing in `options`, the profiled info will be + collected into this argument and passed back. + + Args: + fetches: A single graph element, a list of graph elements, or a dictionary + whose values are graph elements or lists of graph elements (described + above). + feed_dict: A dictionary that maps graph elements to values (described + above). + options: A [`RunOptions`] protocol buffer + run_metadata: A [`RunMetadata`] protocol buffer + + Returns: + Either a single value if `fetches` is a single graph element, or + a list of values if `fetches` is a list, or a dictionary with the + same keys as `fetches` if that is a dictionary (described above). + Order in which `fetches` operations are evaluated inside the call + is undefined. + + Raises: + RuntimeError: If this `Session` is in an invalid state (e.g. has been + closed). + TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type. + ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a + `Tensor` that doesn't exist. + """ + options_ptr = tf_session.TF_NewBufferFromString( + compat.as_bytes(options.SerializeToString())) if options else None + run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None + + try: + result = self._run(None, fetches, feed_dict, options_ptr, + run_metadata_ptr) + if run_metadata: + proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) + run_metadata.ParseFromString(compat.as_bytes(proto_data)) + finally: + if run_metadata_ptr: + tf_session.TF_DeleteBuffer(run_metadata_ptr) + if options: + tf_session.TF_DeleteBuffer(options_ptr) + return result + + @deprecation.deprecated( + '2023-06-01', + 'This function is deprecated and we do not expect adding new' + 'functionality to it. Please do not have your code depending' + 'on this function.', + ) + def partial_run(self, handle, fetches, feed_dict=None): + """Continues the execution with more feeds and fetches. + + NOTE: This function is deprecated and we do not expect adding new + functionality to it. Please do not have your code depending on this + function. + + This is EXPERIMENTAL and subject to change. + + To use partial execution, a user first calls `partial_run_setup()` and + then a sequence of `partial_run()`. `partial_run_setup` specifies the + list of feeds and fetches that will be used in the subsequent + `partial_run` calls. + + The optional `feed_dict` argument allows the caller to override + the value of tensors in the graph. See run() for more information. + + Below is a simple example: + + ```python + a = array_ops.placeholder(dtypes.float32, shape=[]) + b = array_ops.placeholder(dtypes.float32, shape=[]) + c = array_ops.placeholder(dtypes.float32, shape=[]) + r1 = math_ops.add(a, b) + r2 = math_ops.multiply(r1, c) + + h = sess.partial_run_setup([r1, r2], [a, b, c]) + res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) + res = sess.partial_run(h, r2, feed_dict={c: res}) + ``` + + Args: + handle: A handle for a sequence of partial runs. + fetches: A single graph element, a list of graph elements, or a dictionary + whose values are graph elements or lists of graph elements (see + documentation for `run`). + feed_dict: A dictionary that maps graph elements to values (described + above). + + Returns: + Either a single value if `fetches` is a single graph element, or + a list of values if `fetches` is a list, or a dictionary with the + same keys as `fetches` if that is a dictionary + (see documentation for `run`). + + Raises: + tf.errors.OpError: Or one of its subclasses on error. + """ + # TODO(touts): Support feeding and fetching the same tensor. + return self._run(handle, fetches, feed_dict, None, None) + + @deprecation.deprecated( + '2023-06-01', + 'This function is deprecated and we do not expect adding new' + 'functionality to it. Please do not have your code depending' + 'on this function.', + ) + def partial_run_setup(self, fetches, feeds=None): + """Sets up a graph with feeds and fetches for partial run. + + NOTE: This function is deprecated and we do not expect adding new + functionality to it. Please do not have your code depending on this + function. + + This is EXPERIMENTAL and subject to change. + + Note that contrary to `run`, `feeds` only specifies the graph elements. + The tensors will be supplied by the subsequent `partial_run` calls. + + Args: + fetches: A single graph element, or a list of graph elements. + feeds: A single graph element, or a list of graph elements. + + Returns: + A handle for partial run. + + Raises: + RuntimeError: If this `Session` is in an invalid state (e.g. has been + closed). + TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type. + tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens. + """ + + def _feed_fn(feed): + for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS: + if isinstance(feed, tensor_type): + return feed_fn(feed) + raise TypeError(f'Feed argument {feed} has invalid type ' + f'"{type(feed).__name__}"') + + # Check session. + if self._closed: + raise RuntimeError('Attempted to use a closed Session.') + if self.graph.version == 0: + raise RuntimeError('The Session graph is empty. Add operations to the ' + 'graph before calling run().') + + if feeds is None: + feeds = [] + # Create request. + feed_list = [] + + # Validate and process feed_list. + is_list_feed = isinstance(feeds, (list, tuple)) + if not is_list_feed: + feeds = [feeds] + for feed in feeds: + for subfeed in _feed_fn(feed): + try: + subfeed_t = self.graph.as_graph_element( + subfeed, allow_tensor=True, allow_operation=False) + # pylint: disable=protected-access + feed_list.append(subfeed_t._as_tf_output()) + # pylint: enable=protected-access + except Exception as e: + e.message = ('Cannot interpret argument `feed` key as Tensor: ' + f'{e.message}') + e.args = (e.message,) + raise e + + # Validate and process fetches. + # TODO(touts): Support feeding and fetching the same tensor. + fetch_handler = _FetchHandler(self._graph, fetches, {}) + + # Set up a graph with feeds and fetches for partial run. + def _setup_fn(session, feed_list, fetch_list, target_list): + self._extend_graph() + return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list, + fetch_list, target_list) + + # pylint: disable=protected-access + final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()] + final_targets = [op._c_op for op in fetch_handler.targets()] + # pylint: enable=protected-access + + return self._do_call(_setup_fn, self._session, feed_list, final_fetches, + final_targets) + + def _run(self, handle, fetches, feed_dict, options, run_metadata): + """Perform either run or partial_run, depending the presence of `handle`.""" + + def _feed_fn(feed, feed_val): + for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS: + if isinstance(feed, tensor_type): + return feed_fn(feed, feed_val) + raise TypeError(f'{feed} in argument `feed_dict` has invalid type ' + f'"{type(feed).__name__}"') + + # Check session. + if self._closed: + raise RuntimeError('Attempted to use a closed Session.') + if self.graph.version == 0: + raise RuntimeError('The Session graph is empty. Add operations to the ' + 'graph before calling run().') + + # Create request. + feed_dict_tensor = {} + feed_map = {} + + # Validate and process feed_dict. + feed_handles = {} + if feed_dict: + feed_dict = nest.flatten_dict_items(feed_dict) + for feed, feed_val in feed_dict.items(): + for subfeed, subfeed_val in _feed_fn(feed, feed_val): + try: + subfeed_t = self.graph.as_graph_element( + subfeed, allow_tensor=True, allow_operation=False) + except Exception as e: + raise TypeError( + f'Cannot interpret feed_dict key as Tensor: {e.args[0]}') + + if isinstance(subfeed_val, tensor.Tensor): + raise TypeError( + 'The value of a feed cannot be a tf.Tensor object. Acceptable ' + 'feed values include Python scalars, strings, lists, numpy ' + 'ndarrays, or TensorHandles. For reference, the tensor object ' + f'was {str(feed_val)} which was passed to the argument ' + f'`feed_dict` with key {str(feed)}.') + + subfeed_dtype = subfeed_t.dtype.as_numpy_dtype + if isinstance(subfeed_val, int) and _convert_to_numpy_obj( + subfeed_dtype, subfeed_val) != subfeed_val: + raise TypeError( + f'Type of feed value {str(subfeed_val)} with type ' + + f'{str(type(subfeed_val))} is not compatible with Tensor type ' + f'{str(subfeed_dtype)}. Try explicitly setting the type of the ' + 'feed tensor to a larger type (e.g. int64).') + + is_tensor_handle_feed = isinstance(subfeed_val, + session_ops.TensorHandle) + if is_tensor_handle_feed: + np_val = subfeed_val.to_numpy_array() + feed_handles[subfeed_t.ref()] = subfeed_val + else: + np_val = numpy_compat.np_asarray(subfeed_val, subfeed_dtype) + + if (not is_tensor_handle_feed and + not subfeed_t.get_shape().is_compatible_with(np_val.shape)): + raise ValueError( + f'Cannot feed value of shape {str(np_val.shape)} for Tensor ' + f'{subfeed_t.name}, which has shape ' + f'{str(subfeed_t.get_shape())}') + if not self.graph.is_feedable(subfeed_t): + raise ValueError(f'Tensor {subfeed_t.name} may not be fed.') + + feed_dict_tensor[subfeed_t.ref()] = np_val + feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val) + + # Create a fetch handler to take care of the structure of fetches. + fetch_handler = _FetchHandler( + self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles) + + # Run request and get response. + # We need to keep the returned movers alive for the following _do_run(). + # These movers are no longer needed when _do_run() completes, and + # are deleted when `movers` goes out of scope when this _run() ends. + # TODO(yuanbyu, keveman): Revisit whether we should just treat feeding + # of a handle from a different device as an error. + _ = self._update_with_movers(feed_dict_tensor, feed_map) + final_fetches = fetch_handler.fetches() + final_targets = fetch_handler.targets() + # We only want to really perform the run if fetches or targets are provided, + # or if the call is a partial run that specifies feeds. + if final_fetches or final_targets or (handle and feed_dict_tensor): + results = self._do_run(handle, final_targets, final_fetches, + feed_dict_tensor, options, run_metadata) + else: + results = [] + return fetch_handler.build_results(self, results) + + def make_callable(self, fetches, feed_list=None, accept_options=False): + """Returns a Python callable that runs a particular step. + + The returned callable will take `len(feed_list)` arguments whose types + must be compatible feed values for the respective elements of `feed_list`. + For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th + argument to the returned callable must be a numpy ndarray (or something + convertible to an ndarray) with matching element type and shape. See + `tf.Session.run` for details of the allowable feed key and value types. + + The returned callable will have the same return type as + `tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`, + the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`, + it will return `None`. + + Args: + fetches: A value or list of values to fetch. See `tf.Session.run` for + details of the allowable fetch types. + feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run` + for details of the allowable feed key types. + accept_options: (Optional.) If `True`, the returned `Callable` will be + able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata` + as optional keyword arguments `options` and `run_metadata`, + respectively, with the same syntax and semantics as `tf.Session.run`, + which is useful for certain use cases (profiling and debugging) but will + result in measurable slowdown of the `Callable`'s + performance. Default: `False`. + + Returns: + A function that when called will execute the step defined by + `feed_list` and `fetches` in this session. + + Raises: + TypeError: If `fetches` or `feed_list` cannot be interpreted + as arguments to `tf.Session.run`. + """ + if feed_list is not None: + if not isinstance(feed_list, (list, tuple)): + raise TypeError('Argument `feed_list` must be a list or tuple. ' + f'Received: feed_list={feed_list}') + # Delegate any non-empty feed lists to the existing `run()` logic. + # TODO(mrry): Refactor the feed handling logic from + # `Session._run()` so that we can convert the feeds to a list of + # strings here. + def _generic_run(*feed_args, **kwargs): + feed_dict = { + feed: feed_val for feed, feed_val in zip(feed_list, feed_args) + } + return self.run(fetches, feed_dict=feed_dict, **kwargs) + + return _generic_run + + # Ensure any changes to the graph are reflected in the runtime. + # Note that we don't need to do this on subsequent calls to the + # returned object, because the arguments to `fetches` must already be + # in the graph. + self._extend_graph() + + # Create a fetch handler to take care of the structure of fetches. + fetch_handler = _FetchHandler(self._graph, fetches, {}) + # pylint: disable=protected-access + fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()] + target_list = [op._c_op for op in fetch_handler.targets()] + + # pylint: enable=protected-access + + def _callable_template_with_options_and_metadata(fetch_list, + target_list, + fetch_handler, + options=None, + run_metadata=None): + """Template callable that accepts RunOptions and RunMetadata.""" + options_ptr = tf_session.TF_NewBufferFromString( + compat.as_bytes(options.SerializeToString())) if options else None + run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None + try: + results = self._call_tf_sessionrun(options_ptr, {}, fetch_list, + target_list, run_metadata_ptr) + if fetch_handler: + results = fetch_handler.build_results(self, results) + else: + results = results[0] if results else None + if run_metadata: + proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) + run_metadata.ParseFromString(compat.as_bytes(proto_data)) + finally: + if run_metadata_ptr: + tf_session.TF_DeleteBuffer(run_metadata_ptr) + if options: + tf_session.TF_DeleteBuffer(options_ptr) + return results + + if accept_options: + return functools.partial(_callable_template_with_options_and_metadata, + fetch_list, target_list, fetch_handler) + elif isinstance(fetches, ops.Operation): + # Special case for fetching a single operation, because the + # function will have no return value. + assert not fetch_list + assert len(target_list) == 1 + + def _single_operation_run(): + self._call_tf_sessionrun(None, {}, [], target_list, None) + + return _single_operation_run + elif isinstance(fetches, tensor.Tensor): + # Special case for fetching a single tensor, because the + # function can return the result of `TF_Run()` directly. + assert len(fetch_list) == 1 + assert not target_list + + def _single_tensor_run(): + results = self._call_tf_sessionrun(None, {}, fetch_list, [], None) + return results[0] + + return _single_tensor_run + else: + # In all other cases, we must use `fetch_handler` to build the + # results for us. + def _fetch_handler_run(): + results = self._call_tf_sessionrun(None, {}, fetch_list, target_list, + None) + return fetch_handler.build_results(self, results) + + return _fetch_handler_run + + # Captures the name of a node in an error status. The regex below matches + # both the old and the new formats: + # Old format: [[Node: = ...]] + # New format: [[{{node }} = ...]] + _NODEDEF_NAME_RE = re.compile( + r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*') + + def _do_run(self, handle, target_list, fetch_list, feed_dict, options, + run_metadata): + """Runs a step based on the given fetches and feeds. + + Args: + handle: a handle for partial_run. None if this is just a call to run(). + target_list: A list of operations to be run, but not fetched. + fetch_list: A list of tensors to be fetched. + feed_dict: A dictionary that maps tensors to numpy ndarrays. + options: A (pointer to a) [`RunOptions`] protocol buffer, or None + run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None + + Returns: + A list of numpy ndarrays, corresponding to the elements of + `fetch_list`. If the ith element of `fetch_list` contains the + name of an operation, the first Tensor output of that operation + will be returned for that element. + + Raises: + tf.errors.OpError: Or one of its subclasses on error. + """ + # pylint: disable=protected-access + feeds = dict((t.deref()._as_tf_output(), v) for t, v in feed_dict.items()) + fetches = [t._as_tf_output() for t in fetch_list] + targets = [op._c_op for op in target_list] + + # pylint: enable=protected-access + + def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata): + # Ensure any changes to the graph are reflected in the runtime. + self._extend_graph() + return self._call_tf_sessionrun(options, feed_dict, fetch_list, + target_list, run_metadata) + + def _prun_fn(handle, feed_dict, fetch_list): + if target_list: + raise RuntimeError('partial_run() requires empty `target_list`. ' + f'Received: target_list={target_list} (non-empty)') + return self._call_tf_sessionprun(handle, feed_dict, fetch_list) + + if handle is None: + return self._do_call(_run_fn, feeds, fetches, targets, options, + run_metadata) + else: + return self._do_call(_prun_fn, handle, feeds, fetches) + + def _do_call(self, fn, *args): + try: + return fn(*args) + except errors.OpError as e: + message = compat.as_text(e.message) + m = BaseSession._NODEDEF_NAME_RE.search(message) + node_def = None + op = None + if m is not None: + node_name = m.group(3) + try: + op = self._graph.get_operation_by_name(node_name) + node_def = op.node_def + except KeyError: + pass + message = error_interpolation.interpolate_graph(message, self._graph) + if 'only supports NHWC tensor format' in message: + message += ('\nA possible workaround: Try disabling Grappler optimizer' + '\nby modifying the config for creating the session eg.' + '\nsession_config.graph_options.rewrite_options.' + 'disable_meta_optimizer = True') + raise type(e)(node_def, op, message) # pylint: disable=no-value-for-parameter + + def _extend_graph(self): + with self._graph._session_run_lock(): # pylint: disable=protected-access + tf_session.ExtendSession(self._session) + + # The threshold to run garbage collection to delete dead tensors. + _DEAD_HANDLES_THRESHOLD = 10 + + def _register_dead_handle(self, handle): + # Register a dead handle in the session. Delete the dead tensors when + # the number of dead tensors exceeds certain threshold. + tensors_to_delete = None + with self._delete_lock: + self._dead_handles.append(handle) + if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD: + tensors_to_delete = self._dead_handles + self._dead_handles = [] + # Delete the dead tensors. + if tensors_to_delete: + feeds = {} + fetches = [] + for deleter_key, tensor_handle in enumerate(tensors_to_delete): + holder, deleter = session_ops._get_handle_deleter( + self.graph, deleter_key, tensor_handle) + feeds[holder] = tensor_handle + fetches.append(deleter) + self.run(fetches, feed_dict=feeds) + + def _update_with_movers(self, feed_dict, feed_map): + # If a tensor handle that is fed to a device incompatible placeholder, + # we move the tensor to the right device, generate a new tensor handle, + # and update `feed_dict` to use the new handle. + handle_movers = [] + for feed_name, val in feed_map.items(): + mover = session_ops._get_handle_mover(self.graph, *val) + if mover: + handle_movers.append((feed_name, val[1], mover)) + # Transfer a tensor to the right device if needed. + if not handle_movers: + return [] + else: + feeds = {} + fetches = [] + for _, handle, mover in handle_movers: + feeds[mover[0]] = handle + fetches.append(mover[1]) + handles = self.run(fetches, feed_dict=feeds) + for handle_mover, handle in zip(handle_movers, handles): + np_val = np.array(handle.handle, dtype=np.object_) + feed_name = handle_mover[0] + feed_tensor = feed_map[feed_name][0] + feed_dict[feed_tensor.ref()] = np_val + return handles + + def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, + run_metadata): + return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict, + fetch_list, target_list, + run_metadata) + + def _call_tf_sessionprun(self, handle, feed_dict, fetch_list): + return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict, + fetch_list) + + # pylint: disable=protected-access + class _Callable(object): + """Experimental wrapper for the C++ `Session::MakeCallable()` API.""" + + def __init__(self, session, callable_options): + self._session = session + self._handle = None + options_ptr = tf_session.TF_NewBufferFromString( + compat.as_bytes(callable_options.SerializeToString())) + try: + self._handle = tf_session.TF_SessionMakeCallable( + session._session, options_ptr) + finally: + tf_session.TF_DeleteBuffer(options_ptr) + + def __call__(self, *args, **kwargs): + run_metadata = kwargs.get('run_metadata', None) + try: + run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None + ret = tf_session.TF_SessionRunCallable(self._session._session, + self._handle, args, + run_metadata_ptr) + if run_metadata: + proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) + run_metadata.ParseFromString(compat.as_bytes(proto_data)) + finally: + if run_metadata_ptr: + tf_session.TF_DeleteBuffer(run_metadata_ptr) + return ret + + def __del__(self): + # NOTE(mrry): It is possible that `self._session.__del__()` could be + # called before this destructor, in which case `self._session._session` + # will be `None`. + if (self._handle is not None and self._session._session is not None and + not self._session._closed): + tf_session.TF_SessionReleaseCallable(self._session._session, + self._handle) + + # pylint: enable=protected-access + + def _make_callable_from_options(self, callable_options): + """Returns a handle to a "callable" with the given options. + + Args: + callable_options: A `CallableOptions` protocol buffer message describing + the computation that will be performed by the callable. + + Returns: + A handle to the new callable. + """ + self._extend_graph() + return BaseSession._Callable(self, callable_options) + + +@tf_export(v1=['Session']) +class Session(BaseSession): + """A class for running TensorFlow operations. + + A `Session` object encapsulates the environment in which `Operation` + objects are executed, and `Tensor` objects are evaluated. For + example: + + ```python + tf.compat.v1.disable_eager_execution() # need to disable eager in TF2.x + # Build a graph. + a = tf.constant(5.0) + b = tf.constant(6.0) + c = a * b + + # Launch the graph in a session. + sess = tf.compat.v1.Session() + + # Evaluate the tensor `c`. + print(sess.run(c)) # prints 30.0 + ``` + + A session may own resources, such as + `tf.Variable`, `tf.queue.QueueBase`, + and `tf.compat.v1.ReaderBase`. It is important to release + these resources when they are no longer required. To do this, either + invoke the `tf.Session.close` method on the session, or use + the session as a context manager. The following two examples are + equivalent: + + ```python + # Using the `close()` method. + sess = tf.compat.v1.Session() + sess.run(...) + sess.close() + + # Using the context manager. + with tf.compat.v1.Session() as sess: + sess.run(...) + ``` + + The + [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) + protocol buffer exposes various configuration options for a + session. For example, to create a session that uses soft constraints + for device placement, and log the resulting placement decisions, + create a session as follows: + + ```python + # Launch the graph in a session that allows soft device placement and + # logs the placement decisions. + sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto( + allow_soft_placement=True, + log_device_placement=True)) + ``` + + @compatibility(TF2) + `Session` does not work with either eager execution or `tf.function`, and you + should not invoke it directly. To migrate code that uses sessions to TF2, + rewrite the code without it. See the + [migration + guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls) + on replacing `Session.run` calls. + @end_compatibility + """ + + def __init__(self, target='', graph=None, config=None): + """Creates a new TensorFlow session. + + If no `graph` argument is specified when constructing the session, + the default graph will be launched in the session. If you are + using more than one graph (created with `tf.Graph()`) in the same + process, you will have to use different sessions for each graph, + but each graph can be used in multiple sessions. In this case, it + is often clearer to pass the graph to be launched explicitly to + the session constructor. + + Args: + target: (Optional.) The execution engine to connect to. Defaults to using + an in-process engine. See + [Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for + more examples. + graph: (Optional.) The `Graph` to be launched (described above). + config: (Optional.) A + [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) + protocol buffer with configuration options for the session. + """ + super(Session, self).__init__(target, graph, config=config) + # NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle. + self._default_graph_context_manager = None + self._default_session_context_manager = None + + def __enter__(self) -> 'Session': + if self._default_graph_context_manager is None: + self._default_graph_context_manager = self.graph.as_default() + else: + raise RuntimeError('Session context managers are not re-entrant. ' + 'Use `Session.as_default()` if you want to enter ' + 'a session multiple times.') + if self._default_session_context_manager is None: + self._default_session_context_manager = self.as_default() + self._default_graph_context_manager.__enter__() + return self._default_session_context_manager.__enter__() + + def __exit__(self, exec_type, exec_value, exec_tb): + if exec_type is errors.OpError: + logging.error('Session closing due to OpError: %s', (exec_value,)) + try: + self._default_session_context_manager.__exit__(exec_type, exec_value, + exec_tb) + except RuntimeError as error: + if error == exec_value: + # NOTE(skyewm): for some reason, in Python3, + # _default_session_context_manager.__exit__ will re-raise the "not + # re-entrant" exception raised in __enter__ above (note that if we're + # here, we're in the outer session context manager, since __exit__ is + # not called when __enter__ raises an exception). We still want to + # continue cleaning up this context manager before the exception is + # further propagated, so we ignore it here (note that it'll continue + # being propagated after this method completes). + pass + else: + raise + self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb) + + self._default_session_context_manager = None + self._default_graph_context_manager = None + + # If we are closing due to an exception, set a time limit on our Close() to + # avoid blocking forever. + # TODO(b/120204635) remove this when deadlock is fixed. + if exec_type: + close_thread = threading.Thread( + name='SessionCloseThread', target=self.close) + close_thread.daemon = True + close_thread.start() + close_thread.join(30.0) + if close_thread.is_alive(): + logging.error( + 'Session failed to close after 30 seconds. Continuing after this ' + 'point may leave your program in an undefined state.') + else: + self.close() + + @staticmethod + def reset(target, containers=None, config=None): + """Resets resource containers on `target`, and close all connected sessions. + + A resource container is distributed across all workers in the + same cluster as `target`. When a resource container on `target` + is reset, resources associated with that container will be cleared. + In particular, all Variables in the container will become undefined: + they lose their values and shapes. + + NOTE: + (i) reset() is currently only implemented for distributed sessions. + (ii) Any sessions on the master named by `target` will be closed. + + If no resource containers are provided, all containers are reset. + + Args: + target: The execution engine to connect to. + containers: A list of resource container name strings, or `None` if all of + all the containers are to be reset. + config: (Optional.) Protocol buffer with configuration options. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + resetting containers. + """ + if target is not None: + target = compat.as_bytes(target) + if containers is not None: + containers = [compat.as_bytes(c) for c in containers] + else: + containers = [] + tf_session.TF_Reset(target, containers, config) + + +@tf_export(v1=['InteractiveSession']) +class InteractiveSession(BaseSession): + """A TensorFlow `Session` for use in interactive contexts, such as a shell. + + The only difference with a regular `Session` is that an `InteractiveSession` + installs itself as the default session on construction. + The methods `tf.Tensor.eval` + and `tf.Operation.run` + will use that session to run ops. + + This is convenient in interactive shells and [IPython + notebooks](http://ipython.org), as it avoids having to pass an explicit + `Session` object to run ops. + + For example: + + ```python + sess = tf.compat.v1.InteractiveSession() + a = tf.constant(5.0) + b = tf.constant(6.0) + c = a * b + # We can just use 'c.eval()' without passing 'sess' + print(c.eval()) + sess.close() + ``` + + Note that a regular session installs itself as the default session when it + is created in a `with` statement. The common usage in non-interactive + programs is to follow that pattern: + + ```python + a = tf.constant(5.0) + b = tf.constant(6.0) + c = a * b + with tf.compat.v1.Session(): + # We can also use 'c.eval()' here. + print(c.eval()) + ``` + """ + + _count_lock = threading.Lock() + _active_session_count = 0 # GUARDED_BY(_count_lock) + + def __init__(self, target='', graph=None, config=None): + """Creates a new interactive TensorFlow session. + + If no `graph` argument is specified when constructing the session, + the default graph will be launched in the session. If you are + using more than one graph (created with `tf.Graph()`) in the same + process, you will have to use different sessions for each graph, + but each graph can be used in multiple sessions. In this case, it + is often clearer to pass the graph to be launched explicitly to + the session constructor. + + Args: + target: (Optional.) The execution engine to connect to. Defaults to using + an in-process engine. + graph: (Optional.) The `Graph` to be launched (described above). + config: (Optional) `ConfigProto` proto used to configure the session. + """ + if not config: + # If config is not provided, choose some reasonable defaults for + # interactive use: + # + # - Grow GPU memory as needed at the cost of fragmentation. + gpu_options = config_pb2.GPUOptions(allow_growth=True) + config = config_pb2.ConfigProto(gpu_options=gpu_options) + # Interactive sessions always place pruned graphs. + config.graph_options.place_pruned_graph = True + + super(InteractiveSession, self).__init__(target, graph, config) + with InteractiveSession._count_lock: + if InteractiveSession._active_session_count > 0: + logging.error( + 'An interactive session is already active. This can cause' + ' out-of-memory errors or some other unexpected errors (due to' + ' the unpredictable timing of garbage collection) in some cases.' + ' You must explicitly call `InteractiveSession.close()` to release' + ' resources held by the other session(s). Please use `tf.Session()`' + ' if you intend to productionize.' + ) + InteractiveSession._active_session_count += 1 + # NOTE(mrry): We do not use `Session._closed` here because it has unhelpful + # semantics (in particular, it is not set to true if `Session.close()` is + # called on a session that has not been "opened" by running a step) and we + # cannot change those semantics without breaking existing code. + self._explicitly_closed = False + + self._default_session = self.as_default() + self._default_session.enforce_nesting = False + self._default_session.__enter__() + self._explicit_graph = graph + if self._explicit_graph is not None: + self._default_graph = graph.as_default() + self._default_graph.enforce_nesting = False + self._default_graph.__enter__() + + def close(self): + """Closes an `InteractiveSession`.""" + super(InteractiveSession, self).close() + with InteractiveSession._count_lock: + if not self._explicitly_closed: + InteractiveSession._active_session_count -= 1 + self._explicitly_closed = True + else: + return + if self._explicit_graph is not None: + self._default_graph.__exit__(None, None, None) + self._default_graph = None + self._default_session.__exit__(None, None, None) + self._default_session = None diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/timeline.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/timeline.py new file mode 100644 index 0000000000000000000000000000000000000000..9e6d6e773144c49cd9f5ed2cf232680d247b7d50 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/timeline.py @@ -0,0 +1,856 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Timeline visualization for TensorFlow using Chrome Trace Format.""" + +import collections +import copy +import json +import re +from typing import Any, Dict, List, Optional, Tuple, Union + +from tensorflow.core.framework import step_stats_pb2 +# The timeline target is usually imported as part of BUILD target +# "platform_test", which includes also includes the "platform" +# dependency. This is why the logging import here is okay. +from tensorflow.python.platform import build_info +from tensorflow.python.platform import tf_logging as logging + + +class AllocationMaximum( + collections.namedtuple( + 'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors') + ) +): + """Stores the maximum allocation for a given allocator within the timelne. + + Parameters: + timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached. + num_bytes: the total memory used at this time. + tensors: the set of tensors allocated at this time. + """ + + +class StepStatsAnalysis( + collections.namedtuple( + 'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums') + ) +): + """Stores the step stats analysis output. + + Parameters: + chrome_trace: A dict containing the chrome trace analysis. + allocator_maximums: A dict mapping allocator names to AllocationMaximum. + """ + + +class _ChromeTraceFormatter(object): + """A helper class for generating traces in Chrome Trace Format.""" + + def __init__(self, show_memory: bool = False) -> None: + """Constructs a new Chrome Trace formatter.""" + self._show_memory = show_memory + self._events = [] + self._metadata = [] + + def _create_event( + self, + ph: str, + category: str, + name: str, + pid: int, + tid: int, + timestamp: int, + ) -> Dict[str, Union[str, int]]: + """Creates a new Chrome Trace event. + + For details of the file format, see: + https://github.com/catapult-project/catapult/blob/master/tracing/README.md + + Args: + ph: The type of event - usually a single character. + category: The event category as a string. + name: The event name as a string. + pid: Identifier of the process generating this event as an integer. + tid: Identifier of the thread generating this event as an integer. + timestamp: The timestamp of this event as a long integer. + + Returns: + A JSON compatible event object. + """ + event = {} + event['ph'] = ph + event['cat'] = category + event['name'] = name + event['pid'] = pid + event['tid'] = tid + event['ts'] = timestamp + return event + + def emit_pid(self, name: str, pid: int) -> None: + """Adds a process metadata event to the trace. + + Args: + name: The process name as a string. + pid: Identifier of the process as an integer. + """ + event = {} + event['name'] = 'process_name' + event['ph'] = 'M' + event['pid'] = pid + event['args'] = {'name': name} + self._metadata.append(event) + + def emit_tid(self, name, pid, tid): + """Adds a thread metadata event to the trace. + + Args: + name: The thread name as a string. + pid: Identifier of the process as an integer. + tid: Identifier of the thread as an integer. + """ + event = {} + event['name'] = 'thread_name' + event['ph'] = 'M' + event['pid'] = pid + event['tid'] = tid + event['args'] = {'name': name} + self._metadata.append(event) + + def emit_region( + self, + timestamp: int, + duration: int, + pid: int, + tid: int, + category: str, + name: str, + args: Dict[str, Any], + ) -> None: + """Adds a region event to the trace. + + Args: + timestamp: The start timestamp of this region as a long integer. + duration: The duration of this region as a long integer. + pid: Identifier of the process generating this event as an integer. + tid: Identifier of the thread generating this event as an integer. + category: The event category as a string. + name: The event name as a string. + args: A JSON-compatible dictionary of event arguments. + """ + event = self._create_event('X', category, name, pid, tid, timestamp) + event['dur'] = duration + event['args'] = args + self._events.append(event) + + def emit_obj_create( + self, + category: str, + name: str, + timestamp: int, + pid: int, + tid: int, + object_id: int, + ) -> None: + """Adds an object creation event to the trace. + + Args: + category: The event category as a string. + name: The event name as a string. + timestamp: The timestamp of this event as a long integer. + pid: Identifier of the process generating this event as an integer. + tid: Identifier of the thread generating this event as an integer. + object_id: Identifier of the object as an integer. + """ + event = self._create_event('N', category, name, pid, tid, timestamp) + event['id'] = object_id + self._events.append(event) + + def emit_obj_delete( + self, + category: str, + name: str, + timestamp: int, + pid: int, + tid: int, + object_id: int, + ) -> None: + """Adds an object deletion event to the trace. + + Args: + category: The event category as a string. + name: The event name as a string. + timestamp: The timestamp of this event as a long integer. + pid: Identifier of the process generating this event as an integer. + tid: Identifier of the thread generating this event as an integer. + object_id: Identifier of the object as an integer. + """ + event = self._create_event('D', category, name, pid, tid, timestamp) + event['id'] = object_id + self._events.append(event) + + def emit_obj_snapshot( + self, + category: str, + name: str, + timestamp: int, + pid: int, + tid: int, + object_id: int, + snapshot: Dict[str, Any], + ) -> None: + """Adds an object snapshot event to the trace. + + Args: + category: The event category as a string. + name: The event name as a string. + timestamp: The timestamp of this event as a long integer. + pid: Identifier of the process generating this event as an integer. + tid: Identifier of the thread generating this event as an integer. + object_id: Identifier of the object as an integer. + snapshot: A JSON-compatible representation of the object. + """ + event = self._create_event('O', category, name, pid, tid, timestamp) + event['id'] = object_id + event['args'] = {'snapshot': snapshot} + self._events.append(event) + + def emit_flow_start( + self, name: str, timestamp: int, pid: int, tid: int, flow_id: int + ) -> None: + """Adds a flow start event to the trace. + + When matched with a flow end event (with the same 'flow_id') this will + cause the trace viewer to draw an arrow between the start and end events. + + Args: + name: The event name as a string. + timestamp: The timestamp of this event as a long integer. + pid: Identifier of the process generating this event as an integer. + tid: Identifier of the thread generating this event as an integer. + flow_id: Identifier of the flow as an integer. + """ + event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp) + event['id'] = flow_id + self._events.append(event) + + def emit_flow_end( + self, name: str, timestamp: int, pid: int, tid: int, flow_id: int + ) -> None: + """Adds a flow end event to the trace. + + When matched with a flow start event (with the same 'flow_id') this will + cause the trace viewer to draw an arrow between the start and end events. + + Args: + name: The event name as a string. + timestamp: The timestamp of this event as a long integer. + pid: Identifier of the process generating this event as an integer. + tid: Identifier of the thread generating this event as an integer. + flow_id: Identifier of the flow as an integer. + """ + event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp) + event['id'] = flow_id + self._events.append(event) + + def emit_counter( + self, + category: str, + name: str, + pid: int, + timestamp: int, + counter: str, + value: int, + ) -> None: + """Emits a record for a single counter. + + Args: + category: The event category as a string. + name: The event name as a string. + pid: Identifier of the process generating this event as an integer. + timestamp: The timestamp of this event as a long integer. + counter: Name of the counter as a string. + value: Value of the counter as an integer. + """ + event = self._create_event('C', category, name, pid, 0, timestamp) + event['args'] = {counter: value} + self._events.append(event) + + def emit_counters(self, category, name, pid, timestamp, counters): + """Emits a counter record for the dictionary 'counters'. + + Args: + category: The event category as a string. + name: The event name as a string. + pid: Identifier of the process generating this event as an integer. + timestamp: The timestamp of this event as a long integer. + counters: Dictionary of counter values. + """ + event = self._create_event('C', category, name, pid, 0, timestamp) + event['args'] = counters.copy() + self._events.append(event) + + def format_to_string(self, pretty: bool = False) -> str: + """Formats the chrome trace to a string. + + Args: + pretty: (Optional.) If True, produce human-readable JSON output. + + Returns: + A JSON-formatted string in Chrome Trace format. + """ + trace = {} + trace['traceEvents'] = self._metadata + self._events + if pretty: + return json.dumps(trace, indent=4, separators=(',', ': ')) + else: + return json.dumps(trace, separators=(',', ':')) + + +class _TensorTracker(object): + """An internal class to track the lifetime of a Tensor.""" + + def __init__( + self, + name: str, + object_id: int, + timestamp: int, + pid: int, + allocator: str, + num_bytes: int, + ) -> None: + """Creates an object to track tensor references. + + This class is not thread safe and is intended only for internal use by + the 'Timeline' class in this file. + + Args: + name: The name of the Tensor as a string. + object_id: Chrome Trace object identifier assigned for this Tensor. + timestamp: The creation timestamp of this event as a long integer. + pid: Process identifier of the associated device, as an integer. + allocator: Name of the allocator used to create the Tensor. + num_bytes: Number of bytes allocated (long integer). + + Returns: + A 'TensorTracker' object. + """ + self._name = name + self._pid = pid + self._object_id = object_id + self._create_time = timestamp + self._allocator = allocator + self._num_bytes = num_bytes + self._ref_times = [] + self._unref_times = [] + + @property + def name(self) -> str: + """Name of this tensor.""" + return self._name + + @property + def pid(self) -> int: + """ID of the process which created this tensor (an integer).""" + return self._pid + + @property + def create_time(self) -> int: + """Timestamp when this tensor was created (long integer).""" + return self._create_time + + @property + def object_id(self) -> int: + """Returns the object identifier of this tensor (integer).""" + return self._object_id + + @property + def num_bytes(self) -> int: + """Size of this tensor in bytes (long integer).""" + return self._num_bytes + + @property + def allocator(self) -> str: + """Name of the allocator used to create this tensor (string).""" + return self._allocator + + @property + def last_unref(self) -> int: + """Last unreference timestamp of this tensor (long integer).""" + return max(self._unref_times) + + def add_ref(self, timestamp: int) -> None: + """Adds a reference to this tensor with the specified timestamp. + + Args: + timestamp: Timestamp of object reference as an integer. + """ + self._ref_times.append(timestamp) + + def add_unref(self, timestamp: int) -> None: + """Adds an unref to this tensor with the specified timestamp. + + Args: + timestamp: Timestamp of object unreference as an integer. + """ + self._unref_times.append(timestamp) + + +class Timeline(object): + """A class for visualizing execution timelines of TensorFlow steps.""" + + def __init__( + self, step_stats: step_stats_pb2.StepStats, graph: Optional[Any] = None + ) -> None: + """Constructs a new Timeline. + + A 'Timeline' is used for visualizing the execution of a TensorFlow + computation. It shows the timings and concurrency of execution at + the granularity of TensorFlow Ops. + This class is not thread safe. + + Args: + step_stats: The 'step_stats_pb2.StepStats' proto recording execution + times. + graph: (Optional) The 'Graph' that was executed. + """ + + self._origin_step_stats = step_stats + self._step_stats = None + self._graph = graph + self._chrome_trace = _ChromeTraceFormatter() + self._next_pid = 0 + self._device_pids = {} # device name -> pid for compute activity. + self._tensor_pids = {} # device name -> pid for tensors. + self._tensors = {} # tensor_name -> TensorTracker + self._next_flow_id = 0 + self._flow_starts = {} # tensor_name -> (timestamp, pid, tid) + self._alloc_times = {} # tensor_name -> ( time, allocator, size ) + self._allocator_maximums = {} # allocator name => maximum bytes long + + def _alloc_pid(self) -> int: + """Allocate a process Id.""" + pid = self._next_pid + self._next_pid += 1 + return pid + + def _alloc_flow_id(self) -> int: + """Allocate a flow Id.""" + flow_id = self._next_flow_id + self._next_flow_id += 1 + return flow_id + + def _parse_op_label( + self, label: str + ) -> Tuple[str, str, List[str]]: + """Parses the fields in a node timeline label.""" + # Expects labels of the form: name = op(arg, arg, ...). + match = re.match(r'(.*) = (.*)\((.*)\)', label) + if match is None: + return 'unknown', 'unknown', [] + nn, op, inputs = match.groups() + if not inputs: + inputs = [] + else: + inputs = inputs.split(', ') + return nn, op, inputs + + def _parse_kernel_label(self, label, node_name): + """Parses the fields in a node timeline label.""" + # Expects labels of the form: retval (arg) detail @@annotation + start = label.find('@@') + end = label.find('#') + if start >= 0 and end >= 0 and start + 2 < end: + node_name = label[start + 2 : end] + # Node names should always have the form 'name:op'. + fields = node_name.split(':') + ['unknown'] + name, op = fields[:2] + return name, op + + def _assign_lanes(self) -> None: + """Assigns non-overlapping lanes for the activities on each device.""" + for device_stats in self._step_stats.dev_stats: + # TODO(pbar): Genuine thread IDs in step_stats_pb2.NodeExecStats + # might be helpful. + lanes = [0] + for ns in device_stats.node_stats: + l = -1 + for i, lts in enumerate(lanes): + if ns.all_start_micros > lts: + l = i + lanes[l] = ns.all_start_micros + ns.all_end_rel_micros + break + if l < 0: + l = len(lanes) + lanes.append(ns.all_start_micros + ns.all_end_rel_micros) + ns.thread_id = l + + def _emit_op( + self, nodestats: step_stats_pb2.NodeExecStats, pid: int, is_gputrace: bool + ) -> None: + """Generates a Chrome Trace event to show Op execution. + + Args: + nodestats: The 'step_stats_pb2.NodeExecStats' proto recording op + execution. + pid: The pid assigned for the device where this op ran. + is_gputrace: If True then this op came from the GPUTracer. + """ + node_name = nodestats.node_name + start = nodestats.all_start_micros + duration = nodestats.all_end_rel_micros + tid = nodestats.thread_id + inputs = [] + if is_gputrace: + node_name, op = self._parse_kernel_label( + nodestats.timeline_label, node_name + ) + elif node_name == 'RecvTensor': + # RPC tracing does not use the standard timeline_label format. + op = 'RecvTensor' + else: + _, op, inputs = self._parse_op_label(nodestats.timeline_label) + args = {'name': node_name, 'op': op} + if build_info.build_info['is_rocm_build']: + args['kernel'] = nodestats.timeline_label.split('@@')[0] + for i, iname in enumerate(inputs): + args['input%d' % i] = iname + self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args) + + def _emit_tensor_snapshot( + self, + tensor: _TensorTracker, + timestamp: int, + pid: int, + tid: int, + value: step_stats_pb2.NodeOutput, + ) -> None: + """Generate Chrome Trace snapshot event for a computed Tensor. + + Args: + tensor: A 'TensorTracker' object. + timestamp: The timestamp of this snapshot as a long integer. + pid: The pid assigned for showing the device where this op ran. + tid: The tid of the thread computing the tensor snapshot. + value: A JSON-compliant snapshot of the object. + """ + desc = str(value.tensor_description).replace('"', '') + snapshot = {'tensor_description': desc} + self._chrome_trace.emit_obj_snapshot( + 'Tensor', tensor.name, timestamp, pid, tid, tensor.object_id, snapshot + ) + + def _produce_tensor( + self, + name: str, + timestamp: int, + tensors_pid: int, + allocator: str, + num_bytes: int, + ) -> _TensorTracker: + """Creates a new tensor tracker.""" + object_id = len(self._tensors) + tensor = _TensorTracker( + name, object_id, timestamp, tensors_pid, allocator, num_bytes + ) + self._tensors[name] = tensor + return tensor + + def _is_gputrace_device(self, device_name: str) -> bool: + """Returns true if this device is part of the GPUTracer logging.""" + return '/stream:' in device_name or '/memcpy' in device_name + + def _allocate_pids(self) -> None: + """Allocate fake process ids for each device in the step_stats_pb2.StepStats.""" + self._allocators_pid = self._alloc_pid() + self._chrome_trace.emit_pid('Allocators', self._allocators_pid) + + # Add processes in the Chrome trace to show compute and data activity. + for dev_stats in self._step_stats.dev_stats: + device_pid = self._alloc_pid() + self._device_pids[dev_stats.device] = device_pid + tensors_pid = self._alloc_pid() + self._tensor_pids[dev_stats.device] = tensors_pid + self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid) + self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid) + + def _analyze_tensors(self, show_memory: bool) -> None: + """Analyze tensor references to track dataflow.""" + for dev_stats in self._step_stats.dev_stats: + device_pid = self._device_pids[dev_stats.device] + tensors_pid = self._tensor_pids[dev_stats.device] + for node_stats in dev_stats.node_stats: + tid = node_stats.thread_id + node_name = node_stats.node_name + start_time = node_stats.all_start_micros + end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros + for index, output in enumerate(node_stats.output): + if index: + output_name = '%s:%d' % (node_name, index) + else: + output_name = node_name + + allocation = output.tensor_description.allocation_description + num_bytes = allocation.requested_bytes + allocator_name = allocation.allocator_name + tensor = self._produce_tensor( + output_name, start_time, tensors_pid, allocator_name, num_bytes + ) + tensor.add_ref(start_time) + tensor.add_unref(end_time) + self._flow_starts[output_name] = (end_time, device_pid, tid) + + if show_memory: + self._chrome_trace.emit_obj_create( + 'Tensor', + output_name, + start_time, + tensors_pid, + tid, + tensor.object_id, + ) + self._emit_tensor_snapshot( + tensor, end_time - 1, tensors_pid, tid, output + ) + + def _show_compute(self, show_dataflow: bool) -> None: + """Visualize the computation activity.""" + for dev_stats in self._step_stats.dev_stats: + device_name = dev_stats.device + device_pid = self._device_pids[device_name] + is_gputrace = self._is_gputrace_device(device_name) + + for node_stats in dev_stats.node_stats: + tid = node_stats.thread_id + start_time = node_stats.all_start_micros + end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros + self._emit_op(node_stats, device_pid, is_gputrace) + + if is_gputrace or node_stats.node_name == 'RecvTensor': + continue + + _, _, inputs = self._parse_op_label(node_stats.timeline_label) + for input_name in inputs: + if input_name not in self._tensors: + # This can happen when partitioning has inserted a Send/Recv. + # We remove the numeric suffix so that the dataflow appears to + # come from the original node. Ideally, the StepStats would + # contain logging for the Send and Recv nodes. + index = input_name.rfind('/_') + if index > 0: + input_name = input_name[:index] + + if input_name in self._tensors: + tensor = self._tensors[input_name] + tensor.add_ref(start_time) + tensor.add_unref(end_time - 1) + + if show_dataflow: + # We use a different flow ID for every graph edge. + create_time, create_pid, create_tid = self._flow_starts[ + input_name + ] + # Don't add flows when producer and consumer ops are on the same + # pid/tid since the horizontal arrows clutter the visualization. + if create_pid != device_pid or create_tid != tid: + flow_id = self._alloc_flow_id() + self._chrome_trace.emit_flow_start( + input_name, create_time, create_pid, create_tid, flow_id + ) + self._chrome_trace.emit_flow_end( + input_name, start_time, device_pid, tid, flow_id + ) + else: + logging.vlog( + 1, "Can't find tensor %s - removed by CSE?", input_name + ) + + def _show_memory_counters(self) -> None: + """Produce a counter series for each memory allocator.""" + # Iterate over all tensor trackers to build a list of allocations and + # frees for each allocator. Then sort the lists and emit a cumulative + # counter series for each allocator. + allocations = {} + for name in self._tensors: + tensor = self._tensors[name] + self._chrome_trace.emit_obj_delete( + 'Tensor', name, tensor.last_unref, tensor.pid, 0, tensor.object_id + ) + allocator = tensor.allocator + if allocator not in allocations: + allocations[allocator] = [] + num_bytes = tensor.num_bytes + allocations[allocator].append((tensor.create_time, num_bytes, name)) + allocations[allocator].append((tensor.last_unref, -num_bytes, name)) + + alloc_maxes = {} + + # Generate a counter series showing total allocations for each allocator. + for allocator in allocations: + alloc_list = allocations[allocator] + alloc_list.sort() + total_bytes = 0 + alloc_tensor_set = set() + alloc_maxes[allocator] = AllocationMaximum( + timestamp=0, num_bytes=0, tensors=set() + ) + for time, num_bytes, name in sorted( + alloc_list, key=lambda allocation: allocation[0] + ): + total_bytes += num_bytes + if num_bytes < 0: + alloc_tensor_set.discard(name) + else: + alloc_tensor_set.add(name) + + if total_bytes > alloc_maxes[allocator].num_bytes: + alloc_maxes[allocator] = AllocationMaximum( + timestamp=time, + num_bytes=total_bytes, + tensors=copy.deepcopy(alloc_tensor_set), + ) + + self._chrome_trace.emit_counter( + 'Memory', + allocator, + self._allocators_pid, + time, + allocator, + total_bytes, + ) + self._allocator_maximums = alloc_maxes + + def _preprocess_op_time(self, op_time: str) -> None: + """Update the start and end time of ops in step stats. + + Args: + op_time: How the execution time of op is shown in timeline. Possible + values are "schedule", "gpu" and "all". "schedule" will show op from + the time it is scheduled to the end of the scheduling. Notice by the end + of its scheduling its async kernels may not start yet. It is shown using + the default value from step_stats. "gpu" will show op with the execution + time of its kernels on GPU. "all" will show op from the start of its + scheduling to the end of its last kernel. + """ + if op_time == 'schedule': + self._step_stats = self._origin_step_stats + return + self._step_stats = copy.deepcopy(self._origin_step_stats) + # Separate job task and gpu tracer stream + stream_all_stats = [] + job_stats = [] + for stats in self._step_stats.dev_stats: + if '/stream:all' in stats.device: + stream_all_stats.append(stats) + elif '/job' in stats.device: + job_stats.append(stats) + + # Record the start time of the first kernel and the end time of + # the last gpu kernel for all ops. + op_gpu_start = {} + op_gpu_end = {} + for stats in stream_all_stats: + for kernel in stats.node_stats: + name, _ = self._parse_kernel_label( + kernel.timeline_label, kernel.node_name + ) + start = kernel.all_start_micros + end = kernel.all_start_micros + kernel.all_end_rel_micros + if name in op_gpu_start: + op_gpu_start[name] = min(op_gpu_start[name], start) + op_gpu_end[name] = max(op_gpu_end[name], end) + else: + op_gpu_start[name] = start + op_gpu_end[name] = end + + # Update the start and end time of each op according to the op_time + for stats in job_stats: + for op in stats.node_stats: + if op.node_name in op_gpu_start: + end = max( + op_gpu_end[op.node_name], + op.all_start_micros + op.all_end_rel_micros, + ) + if op_time == 'gpu': + op.all_start_micros = op_gpu_start[op.node_name] + op.all_end_rel_micros = end - op.all_start_micros + + def analyze_step_stats( + self, + show_dataflow: bool = True, + show_memory: bool = True, + op_time: str = 'schedule', + ) -> StepStatsAnalysis: + """Analyze the step stats and format it into Chrome Trace Format. + + Args: + show_dataflow: (Optional.) If True, add flow events to the trace + connecting producers and consumers of tensors. + show_memory: (Optional.) If True, add object snapshot events to the trace + showing the sizes and lifetimes of tensors. + op_time: (Optional.) How the execution time of op is shown in timeline. + Possible values are "schedule", "gpu" and "all". "schedule" will show op + from the time it is scheduled to the end of the scheduling. Notice by + the end of its scheduling its async kernels may not start yet. It is + shown using the default value from step_stats. "gpu" will show op with + the execution time of its kernels on GPU. "all" will show op from the + start of its scheduling to the end of its last kernel. + + Returns: + A 'StepStatsAnalysis' object. + """ + self._preprocess_op_time(op_time) + self._allocate_pids() + self._assign_lanes() + self._analyze_tensors(show_memory) + self._show_compute(show_dataflow) + if show_memory: + self._show_memory_counters() + return StepStatsAnalysis( + chrome_trace=self._chrome_trace, + allocator_maximums=self._allocator_maximums, + ) + + def generate_chrome_trace_format( + self, + show_dataflow: bool = True, + show_memory: bool = False, + op_time: str = 'schedule', + ) -> str: + # pyformat: disable + """Produces a trace in Chrome Trace Format. + + Args: + show_dataflow: (Optional.) If True, add flow events to the trace + connecting producers and consumers of tensors. + show_memory: (Optional.) If True, add object snapshot events to the trace + showing the sizes and lifetimes of tensors. + op_time: (Optional.) How the execution time of op is shown in timeline. + Possible values are "schedule", "gpu" and "all". + "schedule" will show op from the time it is scheduled to the end of + the scheduling. + Notice by the end of its scheduling its async kernels may not start + yet. It is shown using the default value from step_stats. + "gpu" will show op with the execution time of its kernels on GPU. + "all" will show op from the start of its scheduling to the end of + its last kernel. + Returns: + A JSON formatted string in Chrome Trace format. + """ + # pyformat: enable + step_stats_analysis = self.analyze_step_stats( + show_dataflow=show_dataflow, show_memory=show_memory, op_time=op_time + ) + + return step_stats_analysis.chrome_trace.format_to_string(pretty=True) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6544d115bf6267779d7a72850f9619416a94b7a3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/all_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/all_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff675ce8e27ab68eead55e1e2de4a8059f6b2a34 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/all_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/compat.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b526b385a44ff0f6707146be22015013ad5082f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/compat.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/custom_nest_protocol.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/custom_nest_protocol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e908f5e195a0341dbdb30cdf932d208601c9e77 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/custom_nest_protocol.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/decorator_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/decorator_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22da59b30e28ea56429620ad5cbdf917feb57a22 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/decorator_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecated_module.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecated_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adb30a6470a0e2da81fc6724d9228ca7a128ab6a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecated_module.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecated_module_new.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecated_module_new.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2536d5b96aafa036299c35edd1310174e0b297b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecated_module_new.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecation.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95d6911b647957546a20990d6b66408765fd4331 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecation.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/dispatch.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83e9e69c2d13f8a4c2877b97fdc1b7364f4f7b9c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/dispatch.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bf54870432afd8717018bcafff0e41baa345037 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__pycache__/compare.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__pycache__/compare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f0a78bc624a8ccfe3ae70cc09c9553d1b1e262d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/__pycache__/compare.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/compare.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/compare.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc61ae28f4674c553d0de96fc31f5a0ada94e85 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/protobuf/compare.py @@ -0,0 +1,377 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions for comparing proto2 messages in Python. + +ProtoEq() compares two proto2 messages for equality. + +ClearDefaultValuedFields() recursively clears the fields that are set to their +default values. This is useful for comparing protocol buffers where the +semantics of unset fields and default valued fields are the same. + +assertProtoEqual() is useful for unit tests. It produces much more helpful +output than assertEqual() for proto2 messages, e.g. this: + + outer { + inner { +- strings: "x" +? ^ ++ strings: "y" +? ^ + } + } + +...compared to the default output from assertEqual() that looks like this: + +AssertionError: != + +Call it inside your unit test's googletest.TestCase subclasses like this: + + from tensorflow.python.util.protobuf import compare + + class MyTest(googletest.TestCase): + ... + def testXXX(self): + ... + compare.assertProtoEqual(self, a, b) + +Alternatively: + + from tensorflow.python.util.protobuf import compare + + class MyTest(compare.ProtoAssertions, googletest.TestCase): + ... + def testXXX(self): + ... + self.assertProtoEqual(a, b) +""" + +import collections.abc as collections_abc +import difflib +import math + +from google.protobuf import descriptor +from google.protobuf import descriptor_pool +from google.protobuf import message +from google.protobuf import text_format + + +# TODO(alankelly): Distinguish between signalling and quiet NaNs. +def isClose(x, y, relative_tolerance): # pylint: disable=invalid-name + """Returns True if x is close to y given the relative tolerance or if x and y are both inf, both -inf, or both NaNs. + + This function does not distinguish between signalling and non-signalling NaN. + + Args: + x: float value to be compared + y: float value to be compared + relative_tolerance: float. The allowable difference between the two values + being compared is determined by multiplying the relative tolerance by the + maximum of the two values. If this is not provided, then all floats are + compared using string comparison. + """ + # NaNs are considered equal. + if math.isnan(x) or math.isnan(y): + return math.isnan(x) == math.isnan(y) + + if math.isinf(x) or math.isinf(y): + return x == y + + return abs(x - y) <= relative_tolerance * max(abs(x), abs(y)) + + +def checkFloatEqAndReplace(self, expected, actual, relative_tolerance): # pylint: disable=invalid-name + """Recursively replaces the floats in actual with those in expected iff they are approximately equal. + + This is done because string equality will consider values such as 5.0999999999 + and 5.1 as not being equal, despite being extremely close. + + Args: + self: googletest.TestCase + expected: expected values + actual: actual values + relative_tolerance: float, relative tolerance. + """ + + for expected_fields, actual_fields in zip( + expected.ListFields(), actual.ListFields() + ): + is_repeated = True + expected_desc, expected_values = expected_fields + actual_values = actual_fields[1] + if expected_desc.label != descriptor.FieldDescriptor.LABEL_REPEATED: + is_repeated = False + expected_values = [expected_values] + actual_values = [actual_values] + + if ( + expected_desc.type == descriptor.FieldDescriptor.TYPE_FLOAT + or expected_desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE + ): + for i, (x, y) in enumerate(zip(expected_values, actual_values)): + # Replace the actual value with the expected value if the test passes, + # otherwise leave it and let it fail in the next test so that the error + # message is nicely formatted + if isClose(x, y, relative_tolerance): + if is_repeated: + getattr(actual, actual_fields[0].name)[i] = x + else: + setattr(actual, actual_fields[0].name, x) + + if ( + expected_desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE + or expected_desc.type == descriptor.FieldDescriptor.TYPE_GROUP + ): + if ( + expected_desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE + and expected_desc.message_type.has_options + and expected_desc.message_type.GetOptions().map_entry + ): + # This is a map, only recurse if it has type message type. + if ( + expected_desc.message_type.fields_by_number[2].type + == descriptor.FieldDescriptor.TYPE_MESSAGE + ): + for e_v, a_v in zip( + iter(expected_values.values()), iter(actual_values.values()) + ): + checkFloatEqAndReplace( + self, + expected=e_v, + actual=a_v, + relative_tolerance=relative_tolerance, + ) + else: + for v, a in zip(expected_values, actual_values): + # recursive step + checkFloatEqAndReplace( + self, expected=v, actual=a, relative_tolerance=relative_tolerance + ) + + +def assertProtoEqual( + self, + a, + b, + check_initialized=True, + normalize_numbers=False, + msg=None, + relative_tolerance=None, +): # pylint: disable=invalid-name( + """Fails with a useful error if a and b aren't equal. + + Comparison of repeated fields matches the semantics of + unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter. + + Args: + self: googletest.TestCase + a: proto2 PB instance, or text string representing one. + b: proto2 PB instance -- message.Message or subclass thereof. + check_initialized: boolean, whether to fail if either a or b isn't + initialized. + normalize_numbers: boolean, whether to normalize types and precision of + numbers before comparison. + msg: if specified, is used as the error message on failure. + relative_tolerance: float, relative tolerance. If this is not provided, then + all floats are compared using string comparison otherwise, floating point + comparisons are done using the relative tolerance provided. + """ + pool = descriptor_pool.Default() + if isinstance(a, str): + a = text_format.Parse(a, b.__class__(), descriptor_pool=pool) + + for pb in a, b: + if check_initialized: + errors = pb.FindInitializationErrors() + if errors: + self.fail('Initialization errors: %s\n%s' % (errors, pb)) + if normalize_numbers: + NormalizeNumberFields(pb) + + if relative_tolerance is not None: + checkFloatEqAndReplace( + self, expected=b, actual=a, relative_tolerance=relative_tolerance + ) + + a_str = text_format.MessageToString(a, descriptor_pool=pool) + b_str = text_format.MessageToString(b, descriptor_pool=pool) + + # Some Python versions would perform regular diff instead of multi-line + # diff if string is longer than 2**16. We substitute this behavior + # with a call to unified_diff instead to have easier-to-read diffs. + # For context, see: https://bugs.python.org/issue11763. + if len(a_str) < 2**16 and len(b_str) < 2**16: + self.assertMultiLineEqual(a_str, b_str, msg=msg) + else: + diff = ''.join( + difflib.unified_diff(a_str.splitlines(True), b_str.splitlines(True))) + if diff: + self.fail('%s :\n%s' % (msg, diff)) + + +def NormalizeNumberFields(pb): + """Normalizes types and precisions of number fields in a protocol buffer. + + Due to subtleties in the python protocol buffer implementation, it is possible + for values to have different types and precision depending on whether they + were set and retrieved directly or deserialized from a protobuf. This function + normalizes integer values to ints and longs based on width, 32-bit floats to + five digits of precision to account for python always storing them as 64-bit, + and ensures doubles are floating point for when they're set to integers. + + Modifies pb in place. Recurses into nested objects. + + Args: + pb: proto2 message. + + Returns: + the given pb, modified in place. + """ + for desc, values in pb.ListFields(): + is_repeated = True + if desc.label != descriptor.FieldDescriptor.LABEL_REPEATED: + is_repeated = False + values = [values] + + normalized_values = None + + # We force 32-bit values to int and 64-bit values to long to make + # alternate implementations where the distinction is more significant + # (e.g. the C++ implementation) simpler. + if desc.type in (descriptor.FieldDescriptor.TYPE_INT64, + descriptor.FieldDescriptor.TYPE_UINT64, + descriptor.FieldDescriptor.TYPE_SINT64): + normalized_values = [int(x) for x in values] + elif desc.type in (descriptor.FieldDescriptor.TYPE_INT32, + descriptor.FieldDescriptor.TYPE_UINT32, + descriptor.FieldDescriptor.TYPE_SINT32, + descriptor.FieldDescriptor.TYPE_ENUM): + normalized_values = [int(x) for x in values] + elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT: + normalized_values = [round(x, 6) for x in values] + elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE: + normalized_values = [round(float(x), 7) for x in values] + + if normalized_values is not None: + if is_repeated: + pb.ClearField(desc.name) + getattr(pb, desc.name).extend(normalized_values) + else: + setattr(pb, desc.name, normalized_values[0]) + + if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE or + desc.type == descriptor.FieldDescriptor.TYPE_GROUP): + if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE and + desc.message_type.has_options and + desc.message_type.GetOptions().map_entry): + # This is a map, only recurse if the values have a message type. + if (desc.message_type.fields_by_number[2].type == + descriptor.FieldDescriptor.TYPE_MESSAGE): + for v in iter(values.values()): + NormalizeNumberFields(v) + else: + for v in values: + # recursive step + NormalizeNumberFields(v) + + return pb + + +def _IsMap(value): + return isinstance(value, collections_abc.Mapping) + + +def _IsRepeatedContainer(value): + if isinstance(value, str): + return False + try: + iter(value) + return True + except TypeError: + return False + + +def ProtoEq(a, b): + """Compares two proto2 objects for equality. + + Recurses into nested messages. Uses list (not set) semantics for comparing + repeated fields, ie duplicates and order matter. + + Args: + a: A proto2 message or a primitive. + b: A proto2 message or a primitive. + + Returns: + `True` if the messages are equal. + """ + def Format(pb): + """Returns a dictionary or unchanged pb bases on its type. + + Specifically, this function returns a dictionary that maps tag + number (for messages) or element index (for repeated fields) to + value, or just pb unchanged if it's neither. + + Args: + pb: A proto2 message or a primitive. + Returns: + A dict or unchanged pb. + """ + if isinstance(pb, message.Message): + return dict((desc.number, value) for desc, value in pb.ListFields()) + elif _IsMap(pb): + return dict(pb.items()) + elif _IsRepeatedContainer(pb): + return dict(enumerate(list(pb))) + else: + return pb + + a, b = Format(a), Format(b) + + # Base case + if not isinstance(a, dict) or not isinstance(b, dict): + return a == b + + # This list performs double duty: it compares two messages by tag value *or* + # two repeated fields by element, in order. the magic is in the format() + # function, which converts them both to the same easily comparable format. + for tag in sorted(set(a.keys()) | set(b.keys())): + if tag not in a or tag not in b: + return False + else: + # Recursive step + if not ProtoEq(a[tag], b[tag]): + return False + + # Didn't find any values that differed, so they're equal! + return True + + +class ProtoAssertions(object): + """Mix this into a googletest.TestCase class to get proto2 assertions. + + Usage: + + class SomeTestCase(compare.ProtoAssertions, googletest.TestCase): + ... + def testSomething(self): + ... + self.assertProtoEqual(a, b) + + See module-level definitions for method documentation. + """ + + # pylint: disable=invalid-name + def assertProtoEqual(self, *args, **kwargs): + return assertProtoEqual(self, *args, **kwargs)