python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This pip smoke test verifies dependency files exist in the pip package.
This script runs bazel queries to see what python files are required by the
tests and ensures they are in the pip package superset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")))
PIP_PACKAGE_QUERY_EXPRESSION = (
"deps(//tensorflow/tools/pip_package:build_pip_package)")
# List of file paths containing BUILD files that should not be included for the
# pip smoke test.
BUILD_BLACKLIST = [
"tensorflow/lite/delegates/gpu",
"tensorflow/lite/delegates/gpu/metal",
"tensorflow/lite/delegates/gpu/metal/kernels",
"tensorflow/lite/experimental/objc",
"tensorflow/lite/experimental/swift",
]
def GetBuild(dir_base):
"""Get the list of BUILD file all targets recursively startind at dir_base."""
items = []
for root, _, files in os.walk(dir_base):
for name in files:
if (name == "BUILD" and root not in BUILD_BLACKLIST):
items.append("//" + root + ":all")
return items
def BuildPyTestDependencies():
python_targets = GetBuild("tensorflow/python")
contrib_targets = GetBuild("tensorflow/contrib")
tensorboard_targets = GetBuild("tensorflow/contrib/tensorboard")
tensorflow_targets = GetBuild("tensorflow")
# Build list of test targets,
# python + contrib - tensorboard - attr(manual|pno_pip)
targets = " + ".join(python_targets)
for t in contrib_targets:
targets += " + " + t
for t in tensorboard_targets:
targets += " - " + t
targets += ' - attr(tags, "manual|no_pip", %s)' % " + ".join(
tensorflow_targets)
query_kind = "kind(py_test, %s)" % targets
# Skip benchmarks etc.
query_filter = 'filter("^((?!benchmark).)*$", %s)' % query_kind
# Get the dependencies
query_deps = "deps(%s, 1)" % query_filter
return python_targets, query_deps
PYTHON_TARGETS, PY_TEST_QUERY_EXPRESSION = BuildPyTestDependencies()
# TODO(amitpatankar): Clean up blacklist.
# List of dependencies that should not included in the pip package.
DEPENDENCY_BLACKLIST = [
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/cc/saved_model:saved_model_half_plus_two",
"//tensorflow:no_tensorflow_py_deps",
"//tensorflow/tools/pip_package:win_pip_package_marker",
"//tensorflow/python:test_ops_2",
"//tensorflow/python:tf_optimizer",
"//tensorflow/python:compare_test_proto_py",
"//tensorflow/core:image_testdata",
"//tensorflow/core:lmdb_testdata",
"//tensorflow/core/kernels/cloud:bigquery_reader_ops",
"//tensorflow/python/debug:grpc_tensorflow_server.par",
"//tensorflow/python/feature_column:vocabulary_testdata",
"//tensorflow/python:framework/test_file_system.so",
"//tensorflow/python:util_nest_test_main_lib",
# lite
"//tensorflow/lite/experimental/examples/lstm:rnn_cell",
"//tensorflow/lite/experimental/examples/lstm:rnn_cell.py",
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test", # pylint:disable=line-too-long
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test.py", # pylint:disable=line-too-long
"//tensorflow/lite/python:interpreter",
"//tensorflow/lite/python:interpreter_test",
"//tensorflow/lite/python:interpreter.py",
"//tensorflow/lite/python:interpreter_test.py",
# contrib
"//tensorflow/contrib/eager/python/examples/revnet:blocks_test_main_lib",
"//tensorflow/contrib/session_bundle:session_bundle_half_plus_two",
"//tensorflow/contrib/keras:testing_utils",
"//tensorflow/contrib/ffmpeg:test_data",
"//tensorflow/contrib/fused_conv:fused_conv2d_bias_activation_op_test_base",
"//tensorflow/contrib/hadoop:test_data",
"//tensorflow/contrib/factorization/examples:mnist",
"//tensorflow/contrib/factorization/examples:mnist.py",
"//tensorflow/contrib/factorization:factorization_py_CYCLIC_DEPENDENCIES_THAT_NEED_TO_GO", # pylint:disable=line-too-long
"//tensorflow/contrib/framework:checkpoint_ops_testdata",
"//tensorflow/contrib/bayesflow:reinforce_simple_example",
"//tensorflow/contrib/bayesflow:examples/reinforce_simple/reinforce_simple_example.py", # pylint:disable=line-too-long
"//tensorflow/contrib/saved_model:reader", # Not present in v2
"//tensorflow/contrib/timeseries/examples:predict",
"//tensorflow/contrib/timeseries/examples:multivariate",
"//tensorflow/contrib/timeseries/examples:known_anomaly",
"//tensorflow/contrib/timeseries/examples:data/period_trend.csv", # pylint:disable=line-too-long
"//tensorflow/contrib/timeseries/python/timeseries:test_utils",
"//tensorflow/contrib/timeseries/python/timeseries/state_space_models:test_utils", # pylint:disable=line-too-long
"//tensorflow/contrib/image:sparse_image_warp_test_data",
]
def main():
"""This script runs the pip smoke test.
Raises:
RuntimeError: If any dependencies for py_tests exist in subSet
Prerequisites:
1. Bazel is installed.
2. Running in github repo of tensorflow.
3. Configure has been run.
"""
# pip_package_dependencies_list is the list of included files in pip packages
pip_package_dependencies = subprocess.check_output(
["bazel", "cquery", PIP_PACKAGE_QUERY_EXPRESSION])
if isinstance(pip_package_dependencies, bytes):
pip_package_dependencies = pip_package_dependencies.decode("utf-8")
pip_package_dependencies_list = pip_package_dependencies.strip().split("\n")
pip_package_dependencies_list = [
x.split()[0] for x in pip_package_dependencies_list
]
print("Pip package superset size: %d" % len(pip_package_dependencies_list))
# tf_py_test_dependencies is the list of dependencies for all python
# tests in tensorflow
tf_py_test_dependencies = subprocess.check_output(
["bazel", "cquery", PY_TEST_QUERY_EXPRESSION])
if isinstance(tf_py_test_dependencies, bytes):
tf_py_test_dependencies = tf_py_test_dependencies.decode("utf-8")
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n")
tf_py_test_dependencies_list = [
x.split()[0] for x in tf_py_test_dependencies.strip().split("\n")
]
print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list))
missing_dependencies = []
# File extensions and endings to ignore
ignore_extensions = [
"_test", "_test.py", "_test_gpu", "_test_gpu.py", "_test_lib"
]
ignored_files_count = 0
blacklisted_dependencies_count = len(DEPENDENCY_BLACKLIST)
# Compare dependencies
for dependency in tf_py_test_dependencies_list:
if dependency and dependency.startswith("//tensorflow"):
ignore = False
# Ignore extensions
if any(dependency.endswith(ext) for ext in ignore_extensions):
ignore = True
ignored_files_count += 1
# Check if the dependency is in the pip package, the dependency blacklist,
# or should be ignored because of its file extension.
if not (ignore or dependency in pip_package_dependencies_list or
dependency in DEPENDENCY_BLACKLIST):
missing_dependencies.append(dependency)
print("Ignored files count: %d" % ignored_files_count)
print("Blacklisted dependencies count: %d" % blacklisted_dependencies_count)
if missing_dependencies:
print("Missing the following dependencies from pip_packages:")
for missing_dependency in missing_dependencies:
print("\nMissing dependency: %s " % missing_dependency)
print("Affected Tests:")
rdep_query = ("rdeps(kind(py_test, %s), %s)" %
(" + ".join(PYTHON_TARGETS), missing_dependency))
affected_tests = subprocess.check_output(["bazel", "cquery", rdep_query])
affected_tests_list = affected_tests.split("\n")[:-2]
print("\n".join(affected_tests_list))
raise RuntimeError("""
One or more added test dependencies are not in the pip package.
If these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.
Else either blacklist the dependencies in //tensorflow/tools/pip_package/pip_smoke_test.py
or add no_pip tag to the test.""")
else:
print("TEST PASSED")
if __name__ == "__main__":
main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/pip_package/pip_smoke_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/pip_package/simple_console.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Auto-detects machine configurations and outputs the results to shell or file.
Supports linux only currently.
Usage:
python config_detector.py [--save_output] [--filename] [--debug]
Example command:
python config_detector.py --save_output=True --filename=configs.json
--debug=False
Flag option(s):
save_output (True | False) Save output to a file.
(Default: True)
filename <file_name>.json Filename(.json) for storing configs.
(Default: `configs.json`)
debug (True | False) View debug and stderr messages.
(Default: False)
The following machine configuration will be detected:
Platform Operating system (linux | macos | windows)
CPU CPU type (e.g. `GenuineIntel`)
CPU architecture Processor type (32-bit | 64-bit)
CPU ISA CPU instruction set (e.g. `sse4`, `sse4_1`, `avx`)
Distribution Operating system distribution (e.g. Ubuntu)
Distribution version Operating system distribution version (e.g. 14.04)
GPU GPU type (e.g. `Tesla K80`)
GPU count Number of GPU's available
CUDA version CUDA version by default (e.g. `10.1`)
CUDA version all CUDA version(s) all available
cuDNN version cuDNN version (e.g. `7.5.0`)
GCC version GCC version (e.g. `7.3.0`)
GLIBC version GLIBC version (e.g. `2.24`)
libstdc++ version libstdc++ version (e.g. `3.4.25`)
Output:
Shell output (print)
A table containing status and info on all configurations will be
printed out to shell.
Configuration file (.json):
Depending on `--save_output` option, this script outputs a .json file
(in the same directory) containing all user machine configurations
that were detected.
"""
# pylint: disable=broad-except
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import re
import subprocess
import sys
from absl import app
from absl import flags
from tensorflow.tools.tensorflow_builder.config_detector.data import cuda_compute_capability
FLAGS = flags.FLAGS
# Define all flags
flags.DEFINE_boolean("save_output", True, "Save output to a file. [True/False]")
flags.DEFINE_string("filename", "configs.json", "Output filename.")
flags.DEFINE_boolean("debug", False, "View debug messages. [True/False]")
# For linux: commands for retrieving user machine configs.
cmds_linux = {
"cpu_type": (
"cat /proc/cpuinfo 2>&1 | grep 'vendor' | uniq"),
"cpu_arch": (
"uname -m"),
"distrib": (
"cat /etc/*-release | grep DISTRIB_ID* | sed 's/^.*=//'"),
"distrib_ver": (
"cat /etc/*-release | grep DISTRIB_RELEASE* | sed 's/^.*=//'"),
"gpu_type": (
"sudo lshw -C display | grep product:* | sed 's/^.*: //'"),
"gpu_type_no_sudo":
r"lspci | grep 'VGA compatible\|3D controller' | cut -d' ' -f 1 | "
r"xargs -i lspci -v -s {} | head -n 2 | tail -1 | "
r"awk '{print $(NF-2), $(NF-1), $NF}'",
"gpu_count": (
"sudo lshw -C display | grep *-display:* | wc -l"),
"gpu_count_no_sudo": (
r"lspci | grep 'VGA compatible\|3D controller' | wc -l"),
"cuda_ver_all": (
"ls -d /usr/local/cuda* 2> /dev/null"),
"cuda_ver_dflt": (
["nvcc --version 2> /dev/null",
"cat /usr/local/cuda/version.txt 2> /dev/null | awk '{print $NF}'"]),
"cudnn_ver": (
["whereis cudnn.h",
"cat `awk '{print $2}'` | grep CUDNN_MAJOR -A 2 | echo "
"`awk '{print $NF}'` | awk '{print $1, $2, $3}' | sed 's/ /./g'"]),
"gcc_ver": (
"gcc --version | awk '{print $NF}' | head -n 1"),
"glibc_ver": (
"ldd --version | tail -n+1 | head -n 1 | awk '{print $NF}'"),
"libstdcpp_ver":
"strings $(/sbin/ldconfig -p | grep libstdc++ | head -n 1 | "
"awk '{print $NF}') | grep LIBCXX | tail -2 | head -n 1",
"cpu_isa": (
"cat /proc/cpuinfo | grep flags | head -n 1"),
}
cmds_all = {
"linux": cmds_linux,
}
# Global variable(s).
PLATFORM = None
GPU_TYPE = None
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/config_detector"
def run_shell_cmd(args):
"""Executes shell commands and returns output.
Args:
args: String of shell commands to run.
Returns:
Tuple output (stdoutdata, stderrdata) from running the shell commands.
"""
proc = subprocess.Popen(
args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return proc.communicate()
def get_platform():
"""Retrieves platform information.
Currently the script only support linux. If other platoforms such as Windows
or MacOS is detected, it throws an error and terminates.
Returns:
String that is platform type.
e.g. 'linux'
"""
global PLATFORM
cmd = "uname"
out, err = run_shell_cmd(cmd)
platform_detected = out.strip().lower()
if platform_detected != "linux":
if err and FLAGS.debug:
print("Error in detecting platform:\n %s" % str(err))
print("Error: Detected unsupported operating system.\nStopping...")
sys.exit(1)
else:
PLATFORM = platform_detected
return PLATFORM
def get_cpu_type():
"""Retrieves CPU (type) information.
Returns:
String that is name of the CPU.
e.g. 'GenuineIntel'
"""
key = "cpu_type"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
cpu_detected = out.split(":")[1].strip()
if err and FLAGS.debug:
print("Error in detecting CPU type:\n %s" % str(err))
return cpu_detected
def get_cpu_arch():
"""Retrieves processor architecture type (32-bit or 64-bit).
Returns:
String that is CPU architecture.
e.g. 'x86_64'
"""
key = "cpu_arch"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print("Error in detecting CPU arch:\n %s" % str(err))
return out.strip("\n")
def get_distrib():
"""Retrieves distribution name of the operating system.
Returns:
String that is the name of distribution.
e.g. 'Ubuntu'
"""
key = "distrib"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print("Error in detecting distribution:\n %s" % str(err))
return out.strip("\n")
def get_distrib_version():
"""Retrieves distribution version of the operating system.
Returns:
String that is the distribution version.
e.g. '14.04'
"""
key = "distrib_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print(
"Error in detecting distribution version:\n %s" % str(err)
)
return out.strip("\n")
def get_gpu_type():
"""Retrieves GPU type.
Returns:
String that is the name of the detected NVIDIA GPU.
e.g. 'Tesla K80'
'unknown' will be returned if detected GPU type is an unknown name.
Unknown name refers to any GPU name that is not specified in this page:
https://developer.nvidia.com/cuda-gpus
"""
global GPU_TYPE
key = "gpu_type_no_sudo"
gpu_dict = cuda_compute_capability.retrieve_from_golden()
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
ret_val = out.split(" ")
gpu_id = ret_val[0]
if err and FLAGS.debug:
print("Error in detecting GPU type:\n %s" % str(err))
if not isinstance(ret_val, list):
GPU_TYPE = "unknown"
return gpu_id, GPU_TYPE
else:
if "[" or "]" in ret_val[1]:
gpu_release = ret_val[1].replace("[", "") + " "
gpu_release += ret_val[2].replace("]", "").strip("\n")
else:
gpu_release = ret_val[1].replace("\n", " ")
if gpu_release not in gpu_dict:
GPU_TYPE = "unknown"
else:
GPU_TYPE = gpu_release
return gpu_id, GPU_TYPE
def get_gpu_count():
"""Retrieves total number of GPU's available in the system.
Returns:
Integer that is the total # of GPU's found.
"""
key = "gpu_count_no_sudo"
out, err = run_shell_cmd(cmds_all[PLATFORM][key])
if err and FLAGS.debug:
print("Error in detecting GPU count:\n %s" % str(err))
return out.strip("\n")
def get_cuda_version_all():
"""Retrieves all additional CUDA versions available (other than default).
For retrieving default CUDA version, use `get_cuda_version` function.
stderr is silenced by default. Setting FLAGS.debug mode will not enable it.
Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable
stderr.
Returns:
List of all CUDA versions found (except default version).
e.g. ['10.1', '10.2']
"""
key = "cuda_ver_all"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
ret_val = out.split("\n")
filtered = []
for item in ret_val:
if item not in ["\n", ""]:
filtered.append(item)
all_vers = []
for item in filtered:
ver_re = re.search(r".*/cuda(\-[\d]+\.[\d]+)?", item)
if ver_re.group(1):
all_vers.append(ver_re.group(1).strip("-"))
if err and FLAGS.debug:
print("Error in detecting CUDA version:\n %s" % str(err))
return all_vers
def get_cuda_version_default():
"""Retrieves default CUDA version.
Default verion is the version found in `/usr/local/cuda/` installation.
stderr is silenced by default. Setting FLAGS.debug mode will not enable it.
Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable
stderr.
It iterates through two types of version retrieval method:
1) Using `nvcc`: If `nvcc` is not available, then it uses next method.
2) Read version file (`version.txt`) found in CUDA install directory.
Returns:
String that is the default CUDA version.
e.g. '10.1'
"""
key = "cuda_ver_dflt"
out = ""
cmd_list = cmds_all[PLATFORM.lower()][key]
for i, cmd in enumerate(cmd_list):
try:
out, err = run_shell_cmd(cmd)
if not out:
raise Exception(err)
except Exception as e:
if FLAGS.debug:
print("\nWarning: Encountered issue while retrieving default CUDA "
"version. (%s) Trying a different method...\n" % e)
if i == len(cmd_list) - 1:
if FLAGS.debug:
print("Error: Cannot retrieve CUDA default version.\nStopping...")
else:
pass
return out.strip("\n")
def get_cuda_compute_capability(source_from_url=False):
"""Retrieves CUDA compute capability based on the detected GPU type.
This function uses the `cuda_compute_capability` module to retrieve the
corresponding CUDA compute capability for the given GPU type.
Args:
source_from_url: Boolean deciding whether to source compute capability
from NVIDIA website or from a local golden file.
Returns:
List of all supported CUDA compute capabilities for the given GPU type.
e.g. ['3.5', '3.7']
"""
if not GPU_TYPE:
if FLAGS.debug:
print("Warning: GPU_TYPE is empty. "
"Make sure to call `get_gpu_type()` first.")
elif GPU_TYPE == "unknown":
if FLAGS.debug:
print("Warning: Unknown GPU is detected. "
"Skipping CUDA compute capability retrieval.")
else:
if source_from_url:
cuda_compute_capa = cuda_compute_capability.retrieve_from_web()
else:
cuda_compute_capa = cuda_compute_capability.retrieve_from_golden()
return cuda_compute_capa[GPU_TYPE]
return
def get_cudnn_version():
"""Retrieves the version of cuDNN library detected.
Returns:
String that is the version of cuDNN library detected.
e.g. '7.5.0'
"""
key = "cudnn_ver"
cmds = cmds_all[PLATFORM.lower()][key]
out, err = run_shell_cmd(cmds[0])
if err and FLAGS.debug:
print("Error in finding `cudnn.h`:\n %s" % str(err))
if len(out.split(" ")) > 1:
cmd = cmds[0] + " | " + cmds[1]
out_re, err_re = run_shell_cmd(cmd)
if err_re and FLAGS.debug:
print("Error in detecting cuDNN version:\n %s" % str(err_re))
return out_re.strip("\n")
else:
return
def get_gcc_version():
"""Retrieves version of GCC detected.
Returns:
String that is the version of GCC.
e.g. '7.3.0'
"""
key = "gcc_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting GCC version:\n %s" % str(err))
return out.strip("\n")
def get_glibc_version():
"""Retrieves version of GLIBC detected.
Returns:
String that is the version of GLIBC.
e.g. '2.24'
"""
key = "glibc_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting GCC version:\n %s" % str(err))
return out.strip("\n")
def get_libstdcpp_version():
"""Retrieves version of libstdc++ detected.
Returns:
String that is the version of libstdc++.
e.g. '3.4.25'
"""
key = "libstdcpp_ver"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting libstdc++ version:\n %s" % str(err))
ver = out.split("_")[-1].replace("\n", "")
return ver
def get_cpu_isa_version():
"""Retrieves all Instruction Set Architecture(ISA) available.
Required ISA(s): 'avx', 'avx2', 'avx512f', 'sse4', 'sse4_1'
Returns:
Tuple
(list of available ISA, list of missing ISA)
"""
key = "cpu_isa"
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print("Error in detecting supported ISA:\n %s" % str(err))
ret_val = out
required_isa = ["avx", "avx2", "avx512f", "sse4", "sse4_1"]
found = []
missing = []
for isa in required_isa:
for sys_isa in ret_val.split(" "):
if isa == sys_isa:
if isa not in found:
found.append(isa)
missing = list(set(required_isa) - set(found))
return found, missing
def get_python_version():
"""Retrieves default Python version.
Returns:
String that is the version of default Python.
e.g. '2.7.4'
"""
ver = str(sys.version_info)
mmm = re.search(r".*major=([\d]), minor=([\d]), micro=([\d]+),.*", ver)
return mmm.group(1) + "." + mmm.group(2) + "." + mmm.group(3)
def get_all_configs():
"""Runs all functions for detecting user machine configurations.
Returns:
Tuple
(List of all configurations found,
List of all missing configurations,
List of all configurations found with warnings,
Dict of all configurations)
"""
all_functions = collections.OrderedDict(
[("Platform", get_platform()),
("CPU", get_cpu_type()),
("CPU arch", get_cpu_arch()),
("Distribution", get_distrib()),
("Distribution version", get_distrib_version()),
("GPU", get_gpu_type()[1]),
("GPU count", get_gpu_count()),
("CUDA version (default)", get_cuda_version_default()),
("CUDA versions (all)", get_cuda_version_all()),
("CUDA compute capability",
get_cuda_compute_capability(get_gpu_type()[1])),
("cuDNN version", get_cudnn_version()),
("GCC version", get_gcc_version()),
("Python version (default)", get_python_version()),
("GNU C Lib (glibc) version", get_glibc_version()),
("libstdc++ version", get_libstdcpp_version()),
("CPU ISA (min requirement)", get_cpu_isa_version())]
)
configs_found = []
json_data = {}
missing = []
warning = []
for config, call_func in all_functions.iteritems():
ret_val = call_func
if not ret_val:
configs_found.append([config, "\033[91m\033[1mMissing\033[0m"])
missing.append([config])
json_data[config] = ""
elif ret_val == "unknown":
configs_found.append([config, "\033[93m\033[1mUnknown type\033[0m"])
warning.append([config, ret_val])
json_data[config] = "unknown"
else:
if "ISA" in config:
if not ret_val[1]:
# Not missing any required ISA
configs_found.append([config, ret_val[0]])
json_data[config] = ret_val[0]
else:
configs_found.append(
[config,
"\033[91m\033[1mMissing " + str(ret_val[1])[1:-1] + "\033[0m"]
)
missing.append(
[config,
"\n\t=> Found %s but missing %s"
% (str(ret_val[0]), str(ret_val[1]))]
)
json_data[config] = ret_val[0]
else:
configs_found.append([config, ret_val])
json_data[config] = ret_val
return (configs_found, missing, warning, json_data)
def print_all_configs(configs, missing, warning):
"""Prints the status and info on all configurations in a table format.
Args:
configs: List of all configurations found.
missing: List of all configurations that are missing.
warning: List of all configurations found with warnings.
"""
print_text = ""
llen = 65 # line length
for i, row in enumerate(configs):
if i != 0:
print_text += "-"*llen + "\n"
if isinstance(row[1], list):
val = ", ".join(row[1])
else:
val = row[1]
print_text += " {: <28}".format(row[0]) + " {: <25}".format(val) + "\n"
print_text += "="*llen
print("\n\n {: ^32} {: ^25}".format("Configuration(s)",
"Detected value(s)"))
print("="*llen)
print(print_text)
if missing:
print("\n * ERROR: The following configurations are missing:")
for m in missing:
print(" ", *m)
if warning:
print("\n * WARNING: The following configurations could cause issues:")
for w in warning:
print(" ", *w)
if not missing and not warning:
print("\n * INFO: Successfully found all configurations.")
print("\n")
def save_to_file(json_data, filename):
"""Saves all detected configuration(s) into a JSON file.
Args:
json_data: Dict of all configurations found.
filename: String that is the name of the output JSON file.
"""
if filename[-5:] != ".json":
print("filename: %s" % filename)
filename += ".json"
with open(PATH_TO_DIR + "/" + filename, "w") as f:
json.dump(json_data, f, sort_keys=True, indent=4)
print(" Successfully wrote configs to file `%s`.\n" % (filename))
def manage_all_configs(save_results, filename):
"""Manages configuration detection and retrieval based on user input.
Args:
save_results: Boolean indicating whether to save the results to a file.
filename: String that is the name of the output JSON file.
"""
# Get all configs
all_configs = get_all_configs()
# Print all configs based on user input
print_all_configs(all_configs[0], all_configs[1], all_configs[2])
# Save all configs to a file based on user request
if save_results:
save_to_file(all_configs[3], filename)
def main(argv):
if len(argv) > 3:
raise app.UsageError("Too many command-line arguments.")
manage_all_configs(
save_results=FLAGS.save_output,
filename=FLAGS.filename,
)
if __name__ == "__main__":
app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/tensorflow_builder/config_detector/config_detector.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Retrieves CUDA compute capability from NVIDIA webpage and creates a `.csv`.
This module is mainly written to supplement for `../config_detector.py`
which retrieves CUDA compute capability from existing golden file.
The golden file resides inside `./golden` directory.
Usage:
python cuda_compute_capability.py
Output:
Creates `compute_capability.csv` file in the same directory by default. If
the file already exists, then it overwrites the file.
In order to use the new `.csv` as the golden, then it should replace the
original golden file (`./golden/compute_capability_golden.csv`) with the
same file name and path.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import difflib
import os
import re
from absl import app
from absl import flags
import six.moves.urllib.request as urllib
FLAGS = flags.FLAGS
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/config_detector"
CUDA_CC_GOLDEN_DIR = PATH_TO_DIR + "/data/golden/compute_capability_golden.csv"
def retrieve_from_web(generate_csv=False):
"""Retrieves list of all CUDA compute capability from NVIDIA webpage.
Args:
generate_csv: Boolean for generating an output file containing
the results.
Returns:
OrderedDict that is a list of all CUDA compute capability listed on the
NVIDIA page. Order goes from top to bottom of the webpage content (.html).
"""
url = "https://developer.nvidia.com/cuda-gpus"
source = urllib.urlopen(url)
matches = []
while True:
line = source.readline()
if "</html>" in line:
break
else:
gpu = re.search(
r"<a href=.*>([\w\S\s\d\[\]\,]+[^*])</a>(<a href=.*)?.*",
line
)
capability = re.search(
r"([\d]+).([\d]+)(/)?([\d]+)?(.)?([\d]+)?.*</td>.*",
line
)
if gpu:
matches.append(gpu.group(1))
elif capability:
if capability.group(3):
capability_str = capability.group(4) + "." + capability.group(6)
else:
capability_str = capability.group(1) + "." + capability.group(2)
matches.append(capability_str)
return create_gpu_capa_map(matches, generate_csv)
def retrieve_from_golden():
"""Retrieves list of all CUDA compute capability from a golden file.
The following file is set as default:
`./golden/compute_capability_golden.csv`
Returns:
Dictionary that lists of all CUDA compute capability in the following
format:
{'<GPU name>': ['<version major>.<version minor>', ...], ...}
If there are multiple versions available for a given GPU, then it
appends all supported versions in the value list (in the key-value
pair.)
"""
out_dict = dict()
with open(CUDA_CC_GOLDEN_DIR) as g_file:
for line in g_file:
line_items = line.split(",")
val_list = []
for item in line_items[1:]:
val_list.append(item.strip("\n"))
out_dict[line_items[0]] = val_list
return out_dict
def create_gpu_capa_map(match_list,
generate_csv=False,
filename="compute_capability"):
"""Generates a map between GPU types and corresponding compute capability.
This method is used for retrieving CUDA compute capability from the web only.
Args:
match_list: List of all CUDA compute capability detected from the webpage.
generate_csv: Boolean for creating csv file to store results.
filename: String that is the name of the csv file (without `.csv` ending).
Returns:
OrderedDict that lists in the incoming order of all CUDA compute capability
provided as `match_list`.
"""
gpu_capa = collections.OrderedDict()
include = False
gpu = ""
cnt = 0
mismatch_cnt = 0
for match in match_list:
if "Products" in match:
if not include:
include = True
continue
elif "www" in match:
include = False
break
if include:
if gpu:
if gpu in gpu_capa:
gpu_capa[gpu].append(match)
else:
gpu_capa[gpu] = [match]
gpu = ""
cnt += 1
if len(gpu_capa.keys()) < cnt:
mismatch_cnt += 1
cnt = len(gpu_capa.keys())
else:
gpu = match
if generate_csv:
f_name = filename + ".csv"
write_csv_from_dict(f_name, gpu_capa)
return gpu_capa
def write_csv_from_dict(filename, input_dict):
"""Writes out a `.csv` file from an input dictionary.
After writing out the file, it checks the new list against the golden
to make sure golden file is up-to-date.
Args:
filename: String that is the output file name.
input_dict: Dictionary that is to be written out to a `.csv` file.
"""
f = open(PATH_TO_DIR + "/data/" + filename, "w")
for k, v in input_dict.iteritems():
line = k
for item in v:
line += "," + item
f.write(line + "\n")
f.flush()
print("Wrote to file %s" % filename)
check_with_golden(filename)
def check_with_golden(filename):
"""Checks the newly created CUDA compute capability file with the golden.
If differences are found, then it prints a list of all mismatches as
a `WARNING`.
Golden file must reside in `golden/` directory.
Args:
filename: String that is the name of the newly created file.
"""
path_to_file = PATH_TO_DIR + "/data/" + filename
if os.path.isfile(path_to_file) and os.path.isfile(CUDA_CC_GOLDEN_DIR):
with open(path_to_file, "r") as f_new:
with open(CUDA_CC_GOLDEN_DIR, "r") as f_golden:
diff = difflib.unified_diff(
f_new.readlines(),
f_golden.readlines(),
fromfile=path_to_file,
tofile=CUDA_CC_GOLDEN_DIR
)
diff_list = []
for line in diff:
diff_list.append(line)
if diff_list:
print("WARNING: difference(s) found between new csv and golden csv.")
print(diff_list)
else:
print("No difference found between new csv and golen csv.")
def print_dict(py_dict):
"""Prints dictionary with formatting (2 column table).
Args:
py_dict: Dictionary that is to be printed out in a table format.
"""
for gpu, cc in py_dict.items():
print("{:<25}{:<25}".format(gpu, cc))
def main(argv):
if len(argv) > 2:
raise app.UsageError("Too many command-line arguments.")
retrieve_from_web(generate_csv=True)
if __name__ == "__main__":
app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/tensorflow_builder/config_detector/data/__init__.py
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Checks if a set of configuration(s) is version and dependency compatible."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
# pylint: disable=g-import-not-at-top
if six.PY2:
import ConfigParser
else:
import configparser as ConfigParser
# pylint: enable=g-import-not-at-top
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/compat_checker"
def _compare_versions(v1, v2):
"""Compare two versions and return information on which is smaller vs. larger.
Args:
v1: String that is a version to be compared against `v2`.
v2: String that is a version to be compared against `v1`.
Returns:
Dict that stores larger version with key `larger` and smaller version with
key `smaller`.
e.g. {`larger`: `1.5.0`, `smaller`: `1.2.0`}
Raises:
RuntimeError: If asked to compare `inf` to `inf`.
"""
# Throw error is asked to compare `inf` to `inf`.
if v1 == "inf" and v2 == "inf":
raise RuntimeError("Cannot compare `inf` to `inf`.")
rtn_dict = {"smaller": None, "larger": None}
v1_list = v1.split(".")
v2_list = v2.split(".")
# Take care of cases with infinity (arg=`inf`).
if v1_list[0] == "inf":
v1_list[0] = str(int(v2_list[0]) + 1)
if v2_list[0] == "inf":
v2_list[0] = str(int(v1_list[0]) + 1)
# Determine which of the two lists are longer vs. shorter.
v_long = v1_list if len(v1_list) >= len(v2_list) else v2_list
v_short = v1_list if len(v1_list) < len(v2_list) else v2_list
larger, smaller = None, None
for i, ver in enumerate(v_short, start=0):
if int(ver) > int(v_long[i]):
larger = _list_to_string(v_short, ".")
smaller = _list_to_string(v_long, ".")
elif int(ver) < int(v_long[i]):
larger = _list_to_string(v_long, ".")
smaller = _list_to_string(v_short, ".")
else:
if i == len(v_short) - 1:
if v_long[i + 1:] == ["0"]*(len(v_long) - 1 - i):
larger = "equal"
smaller = "equal"
else:
larger = _list_to_string(v_long, ".")
smaller = _list_to_string(v_short, ".")
else:
# Go to next round.
pass
if larger:
break
rtn_dict["smaller"] = smaller
rtn_dict["larger"] = larger
return rtn_dict
def _list_to_string(l, s):
"""Concatenates list items into a single string separated by `s`.
Args:
l: List with items to be concatenated into a single string.
s: String or char that will be concatenated in between each item.
Returns:
String that has all items in list `l` concatenated with `s` separator.
"""
return s.join(l)
def _get_func_name():
"""Get the name of current function.
Returns:
String that is the name of current function.
"""
return tf_inspect.stack()[1][3]
class ConfigCompatChecker(object):
"""Class that checks configuration versions and depencency compatibilities.
`ConfigCompatChecker` checks a given set of configurations and their versions
against supported versions and dependency rules defined in `.ini` config file.
For project `TensorFlow Builder`, it functions as a sub-module for the builder
service that validates requested build configurations from a client prior to
initiating a TensorFlow build.
"""
class _Reqs(object):
"""Class that stores specifications related to a single requirement.
`_Reqs` represents a single version or dependency requirement specified in
the `.ini` config file. It is meant ot be used inside `ConfigCompatChecker`
to help organize and identify version and dependency compatibility for a
given configuration (e.g. gcc version) required by the client.
"""
def __init__(self, req, config, section):
"""Initializes a version or dependency requirement object.
Args:
req: List that contains individual supported versions or a single string
that contains `range` definition.
e.g. [`range(1.0, 2.0) include(3.0) exclude(1.5)`]
e.g. [`1.0`, `3.0`, `7.1`]
config: String that is the configuration name.
e.g. `platform`
section: String that is the section name from the `.ini` config file
under which the requirement is defined.
e.g. `Required`, `Optional`, `Unsupported`, `Dependency`
"""
# Req class variables.
self.req = req
self.exclude = None
self.include = None
self.range = [None, None] # for [min, max]
self.config = config
self._req_type = "" # e.g. `range` or `no_range`
self._section = section
self._initialized = None
self._error_message = []
# Parse and store requirement specifications.
self.parse_single_req()
@property
def get_status(self):
"""Get status of `_Reqs` initialization.
Returns:
Tuple
(Boolean indicating initialization status,
List of error messages, if any)
"""
return self._initialized, self._error_message
def __str__(self):
"""Prints a requirement and its components.
Returns:
String that has concantenated information about a requirement.
"""
info = {
"section": self._section,
"config": self.config,
"req_type": self._req_type,
"req": str(self.req),
"range": str(self.range),
"exclude": str(self.exclude),
"include": str(self.include),
"init": str(self._initialized)
}
req_str = "\n >>> _Reqs Instance <<<\n"
req_str += "Section: {section}\n"
req_str += "Configuration name: {config}\n"
req_str += "Requirement type: {req_type}\n"
req_str += "Requirement: {req}\n"
req_str += "Range: {range}\n"
req_str += "Exclude: {exclude}\n"
req_str += "Include: {include}\n"
req_str += "Initilalized: {init}\n\n"
return req_str.format(**info)
def parse_single_req(self):
"""Parses a requirement and stores information.
`self.req` _initialized in `__init__` is called for retrieving the
requirement.
A requirement can come in two forms:
[1] String that includes `range` indicating range syntax for defining
a requirement.
e.g. `range(1.0, 2.0) include(3.0) exclude(1.5)`
[2] List that includes inidividual supported versions or items.
e.g. [`1.0`, `3.0`, `7.1`]
For a list type requirement, it directly stores the list to
`self.include`.
Call `get_status` for checking the status of the parsing. This function
sets `self._initialized` to `False` and immediately returns with an error
message upon encountering a failure. It sets `self._initialized` to `True`
and returns without an error message upon success.
"""
# Regex expression for filtering requirement line. Please refer
# to docstring above for more information.
expr = r"(range\()?([\d\.\,\s]+)(\))?( )?(include\()?"
expr += r"([\d\.\,\s]+)?(\))?( )?(exclude\()?([\d\.\,\s]+)?(\))?"
# Check that arg `req` is not empty.
if not self.req:
err_msg = "[Error] Requirement is missing. "
err_msg += "(section = %s, " % str(self._section)
err_msg += "config = %s, req = %s)" % (str(self.config), str(self.req))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
# For requirement given in format with `range`. For example:
# python = [range(3.3, 3.7) include(2.7)] as opposed to
# python = [2.7, 3.3, 3.4, 3.5, 3.6, 3.7]
if "range" in self.req[0]:
self._req_type = "range"
match = re.match(expr, self.req[0])
if not match:
err_msg = "[Error] Encountered issue when parsing the requirement."
err_msg += " (req = %s, match = %s)" % (str(self.req), str(match))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
else:
match_grp = match.groups()
match_size = len(match_grp)
for i, m in enumerate(match_grp[0:match_size-1], start=0):
# Get next index. For example:
# | idx | next_idx |
# +------------+------------+
# | `range(` | `1.1, 1.5` |
# | `exclude(` | `1.1, 1.5` |
# | `include(` | `1.1, 1.5` |
next_match = match_grp[i + 1]
if m not in ["", None, " ", ")"]:
if "range" in m:
# Check that the range definition contains only one comma.
# If more than one comma, then there is format error with the
# requirement config file.
comma_count = next_match.count(",")
if comma_count > 1 or comma_count == 0:
err_msg = "[Error] Found zero or more than one comma in range"
err_msg += " definition. (req = %s, " % str(self.req)
err_msg += "match = %s)" % str(next_match)
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
return
# Remove empty space in range and separate min, max by
# comma. (e.g. `1.0, 2.0` => `1.0,2.0` => [`1.0`, `2.0`])
min_max = next_match.replace(" ", "").split(",")
# Explicitly define min and max values.
# If min_max = ['', ''], then `range(, )` was provided as
# req, which is equivalent to `include all versions`.
if not min_max[0]:
min_max[0] = "0"
if not min_max[1]:
min_max[1] = "inf"
self.range = min_max
if "exclude" in m:
self.exclude = next_match.replace(" ", "").split(",")
if "include" in m:
self.include = next_match.replace(" ", "").split(",")
self._initialized = True
# For requirement given in format without a `range`. For example:
# python = [2.7, 3.3, 3.4, 3.5, 3.6, 3.7] as opposed to
# python = [range(3.3, 3.7) include(2.7)]
else:
self._req_type = "no_range"
# Requirement (self.req) should be a list.
if not isinstance(self.req, list):
err_msg = "[Error] Requirement is not a list."
err_msg += "(req = %s, " % str(self.req)
err_msg += "type(req) = %s)" % str(type(self.req))
logging.error(err_msg)
self._initialized = False
self._error_message.append(err_msg)
else:
self.include = self.req
self._initialized = True
return
def __init__(self, usr_config, req_file):
"""Initializes a configuration compatibility checker.
Args:
usr_config: Dict of all configuration(s) whose version compatibilities are
to be checked against the rules defined in the `.ini` config
file.
req_file: String that is the full name of the `.ini` config file.
e.g. `config.ini`
"""
# ConfigCompatChecker class variables.
self.usr_config = usr_config
self.req_file = req_file
self.warning_msg = []
self.error_msg = []
# Get and store requirements.
reqs_all = self.get_all_reqs()
self.required = reqs_all["required"]
self.optional = reqs_all["optional"]
self.unsupported = reqs_all["unsupported"]
self.dependency = reqs_all["dependency"]
self.successes = []
self.failures = []
def get_all_reqs(self):
"""Parses all compatibility specifications listed in the `.ini` config file.
Reads and parses each and all compatibility specifications from the `.ini`
config file by sections. It then populates appropriate dicts that represent
each section (e.g. `self.required`) and returns a tuple of the populated
dicts.
Returns:
Dict of dict
{ `required`: Dict of `Required` configs and supported versions,
`optional`: Dict of `Optional` configs and supported versions,
`unsupported`: Dict of `Unsupported` configs and supported versions,
`dependency`: Dict of `Dependency` configs and supported versions }
"""
# First check if file exists. Exit on failure.
try:
open(self.req_file, "rb")
except IOError:
msg = "[Error] Cannot read file '%s'." % self.req_file
logging.error(msg)
sys.exit(1)
# Store status of parsing requirements. For local usage only.
curr_status = True
# Initialize config parser for parsing version requirements file.
parser = ConfigParser.ConfigParser()
parser.read(self.req_file)
if not parser.sections():
err_msg = "[Error] Empty confie file. "
err_msg += "(file = %s, " % str(self.req_file)
err_msg += "parser sectons = %s)" % str(parser.sections())
self.error_msg.append(err_msg)
logging.error(err_msg)
curr_status = False
# Each dependency dict will have the following format.
# _dict = {
# `<config_name>` : [_Reqs()],
# `<config_name>` : [_Reqs()]
# }
required_dict = {}
optional_dict = {}
unsupported_dict = {}
dependency_dict = {}
# Parse every config under each section defined in config file
# and populate requirement dict(s).
for section in parser.sections():
all_configs = parser.options(section)
for config in all_configs:
spec = parser.get(section, config)
# Separately manage each section:
# `Required`,
# `Optional`,
# `Unsupported`,
# `Dependency`
# One of the sections is required.
if section == "Dependency":
dependency_dict[config] = []
spec_split = spec.split(",\n")
# First dependency item may only or not have `[` depending
# on the indentation style in the config (.ini) file.
# If it has `[`, then either skip or remove from string.
if spec_split[0] == "[":
spec_split = spec_split[1:]
elif "[" in spec_split[0]:
spec_split[0] = spec_split[0].replace("[", "")
else:
warn_msg = "[Warning] Config file format error: Missing `[`."
warn_msg += "(section = %s, " % str(section)
warn_msg += "config = %s)" % str(config)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# Last dependency item may only or not have `]` depending
# on the identation style in the config (.ini) file.
# If it has `[`, then either skip or remove from string.
if spec_split[-1] == "]":
spec_split = spec_split[:-1]
elif "]" in spec_split[-1]:
spec_split[-1] = spec_split[-1].replace("]", "")
else:
warn_msg = "[Warning] Config file format error: Missing `]`."
warn_msg += "(section = %s, " % str(section)
warn_msg += "config = %s)" % str(config)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# Parse `spec_split` which is a list of all dependency rules
# retrieved from the config file.
# Create a _Reqs() instance for each rule and store it under
# appropriate class dict (e.g. dependency_dict) with a proper
# key.
#
# For dependency definition, it creates one _Reqs() instance each
# for requirement and dependency. For example, it would create
# a list in the following indexing sequence:
#
# [`config', <`config` _Reqs()>, `dep', <`dep` _Reqs()>]
#
# For example:
# [`python`, _Reqs(), `tensorflow`, _Reqs()] for
# `python 3.7 requires tensorflow 1.13`
for rule in spec_split:
# Filter out only the necessary information from `rule` string.
spec_dict = self.filter_dependency(rule)
# Create _Reqs() instance for each rule.
cfg_name = spec_dict["cfg"] # config name
dep_name = spec_dict["cfgd"] # dependency name
cfg_req = self._Reqs(
self.convert_to_list(spec_dict["cfg_spec"], " "),
config=cfg_name,
section=section
)
dep_req = self._Reqs(
self.convert_to_list(spec_dict["cfgd_spec"], " "),
config=dep_name,
section=section
)
# Check status of _Reqs() initialization. If wrong formats are
# detected from the config file, it would return `False` for
# initialization status.
# `<_Reqs>.get_status` returns [_initialized, _error_message]
cfg_req_status = cfg_req.get_status
dep_req_status = dep_req.get_status
if not cfg_req_status[0] or not dep_req_status[0]:
# `<_Reqs>.get_status()[1]` returns empty upon successful init.
msg = "[Error] Failed to create _Reqs() instance for a "
msg += "dependency item. (config = %s, " % str(cfg_name)
msg += "dep = %s)" % str(dep_name)
logging.error(msg)
self.error_msg.append(cfg_req_status[1])
self.error_msg.append(dep_req_status[1])
curr_status = False
break
else:
dependency_dict[config].append(
[cfg_name, cfg_req, dep_name, dep_req])
# Break out of `if section == 'Dependency'` block.
if not curr_status:
break
else:
if section == "Required":
add_to = required_dict
elif section == "Optional":
add_to = optional_dict
elif section == "Unsupported":
add_to = unsupported_dict
else:
msg = "[Error] Section name `%s` is not accepted." % str(section)
msg += "Accepted section names are `Required`, `Optional`, "
msg += "`Unsupported`, and `Dependency`."
logging.error(msg)
self.error_msg.append(msg)
curr_status = False
break
# Need to make sure `req` argument for _Reqs() instance is always
# a list. If not, convert to list.
req_list = self.convert_to_list(self.filter_line(spec), " ")
add_to[config] = self._Reqs(req_list, config=config, section=section)
# Break out of `for config in all_configs` loop.
if not curr_status:
break
# Break out of `for section in parser.sections()` loop.
if not curr_status:
break
return_dict = {
"required": required_dict,
"optional": optional_dict,
"unsupported": unsupported_dict,
"dependency": dependency_dict
}
return return_dict
def filter_dependency(self, line):
"""Filters dependency compatibility rules defined in the `.ini` config file.
Dependency specifications are defined as the following:
`<config> <config_version> requires <dependency> <dependency_version>`
e.g.
`python 3.7 requires tensorflow 1.13`
`tensorflow range(1.0.0, 1.13.1) requires gcc range(4.8, )`
Args:
line: String that is a dependency specification defined under `Dependency`
section in the `.ini` config file.
Returns:
Dict with configuration and its dependency information.
e.g. {`cfg`: `python`, # configuration name
`cfg_spec`: `3.7`, # configuration version
`cfgd`: `tensorflow`, # dependency name
`cfgd_spec`: `4.8`} # dependency version
"""
line = line.strip("\n")
expr = r"(?P<cfg>[\S]+) (?P<cfg_spec>range\([\d\.\,\s]+\)( )?"
expr += r"(include\([\d\.\,\s]+\))?( )?(exclude\([\d\.\,\s]+\))?( )?"
expr += r"|[\d\,\.\s]+) requires (?P<cfgd>[\S]+) (?P<cfgd_spec>range"
expr += r"\([\d\.\,\s]+\)( )?(include\([\d\.\,\s]+\))?( )?"
expr += r"(exclude\([\d\.\,\s]+\))?( )?|[\d\,\.\s]+)"
r = re.match(expr, line.strip("\n"))
return r.groupdict()
def convert_to_list(self, item, separator):
"""Converts a string into a list with a separator.
Args:
item: String that needs to be separated into a list by a given separator.
List item is also accepted but will take no effect.
separator: String with which the `item` will be splited.
Returns:
List that is a splited version of a given input string.
e.g. Input: `1.0, 2.0, 3.0` with `, ` separator
Output: [1.0, 2.0, 3.0]
"""
out = None
if not isinstance(item, list):
if "range" in item:
# If arg `item` is a single string, then create a list with just
# the item.
out = [item]
else:
# arg `item` can come in as the following:
# `1.0, 1.1, 1.2, 1.4`
# if requirements were defined without the `range()` format.
# In such a case, create a list separated by `separator` which is
# an empty string (' ') in this case.
out = item.split(separator)
for i in range(len(out)):
out[i] = out[i].replace(",", "")
# arg `item` is a list already.
else:
out = [item]
return out
def filter_line(self, line):
"""Removes `[` or `]` from the input line.
Args:
line: String that is a compatibility specification line from the `.ini`
config file.
Returns:
String that is a compatibility specification line without `[` and `]`.
"""
filtered = []
warn_msg = []
splited = line.split("\n")
# If arg `line` is empty, then requirement might be missing. Add
# to warning as this issue will be caught in _Reqs() initialization.
if not line and len(splited) < 1:
warn_msg = "[Warning] Empty line detected while filtering lines."
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, first line in requirement definition will include `[`
# in the config file (.ini). Remove it.
if splited[0] == "[":
filtered = splited[1:]
elif "[" in splited[0]:
splited = splited[0].replace("[", "")
filtered = splited
# If `[` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `[` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
# In general, last line in requirement definition will include `]`
# in the config file (.ini). Remove it.
if filtered[-1] == "]":
filtered = filtered[:-1]
elif "]" in filtered[-1]:
filtered[-1] = filtered[-1].replace("]", "")
# If `]` is missing, then it could be a formatting issue with
# config file (.ini.). Add to warning.
else:
warn_msg = "[Warning] Format error. `]` could be missing in "
warn_msg += "the config (.ini) file. (line = %s)" % str(line)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
return filtered
def in_range(self, ver, req):
"""Checks if a version satisfies a version and/or compatibility requirement.
Args:
ver: List whose first item is a config version that needs to be checked
for support status and version compatibility.
e.g. ver = [`1.0`]
req: `_Reqs` class instance that represents a configuration version and
compatibility specifications.
Returns:
Boolean output of checking if version `ver` meets the requirement
stored in `req` (or a `_Reqs` requirements class instance).
"""
# If `req.exclude` is not empty and `ver` is in `req.exclude`,
# no need to proceed to next set of checks as it is explicitly
# NOT supported.
if req.exclude is not None:
for v in ver:
if v in req.exclude:
return False
# If `req.include` is not empty and `ver` is in `req.include`,
# no need to proceed to next set of checks as it is supported and
# NOT unsupported (`req.exclude`).
include_checked = False
if req.include is not None:
for v in ver:
if v in req.include:
return True
include_checked = True
# If `req.range` is not empty, then `ver` is defined with a `range`
# syntax. Check whether `ver` falls under the defined supported
# range.
if req.range != [None, None]:
min_v = req.range[0] # minimum supported version
max_v = req.range[1] # maximum supported version
ver = ver[0] # version to compare
lg = _compare_versions(min_v, ver)["larger"] # `ver` should be larger
sm = _compare_versions(ver, max_v)["smaller"] # `ver` should be smaller
if lg in [ver, "equal"] and sm in [ver, "equal", "inf"]:
return True
else:
err_msg = "[Error] Version is outside of supported range. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
else:
err_msg = ""
if include_checked:
# user config is not supported as per exclude, include, range
# specification.
err_msg = "[Error] Version is outside of supported range. "
else:
# user config is not defined in exclude, include or range. config file
# error.
err_msg = "[Error] Missing specification. "
err_msg += "(config = %s, " % str(req.config)
err_msg += "version = %s, " % str(ver)
err_msg += "supported range = %s)" % str(req.range)
logging.warning(err_msg)
self.warning_msg.append(err_msg)
return False
def _print(self, *args):
"""Prints compatibility check status and failure or warning messages.
Prints to console without using `logging`.
Args:
*args: String(s) that is one of:
[`failures`, # all failures
`successes`, # all successes
`failure_msgs`, # failure message(s) recorded upon failure(s)
`warning_msgs`] # warning message(s) recorded upon warning(s)
Raises:
Exception: If *args not in:
[`failures`, `successes`, `failure_msgs`, `warning_msg`]
"""
def _format(name, arr):
"""Prints compatibility check results with a format.
Args:
name: String that is the title representing list `arr`.
arr: List of items to be printed in a certain format.
"""
title = "### All Compatibility %s ###" % str(name)
tlen = len(title)
print("-"*tlen)
print(title)
print("-"*tlen)
print(" Total # of %s: %s\n" % (str(name), str(len(arr))))
if arr:
for item in arr:
detail = ""
if isinstance(item[1], list):
for itm in item[1]:
detail += str(itm) + ", "
detail = detail[:-2]
else:
detail = str(item[1])
print(" %s ('%s')\n" % (str(item[0]), detail))
else:
print(" No %s" % name)
print("\n")
for p_item in args:
if p_item == "failures":
_format("Failures", self.failures)
elif p_item == "successes":
_format("Successes", self.successes)
elif p_item == "failure_msgs":
_format("Failure Messages", self.error_msg)
elif p_item == "warning_msgs":
_format("Warning Messages", self.warning_msg)
else:
raise Exception(
"[Error] Wrong input provided for %s." % _get_func_name())
def check_compatibility(self):
"""Checks version and dependency compatibility for a given configuration.
`check_compatibility` immediately returns with `False` (or failure status)
if any child process or checks fail. For error and warning messages, either
print `self.(error_msg|warning_msg)` or call `_print` function.
Returns:
Boolean that is a status of the compatibility check result.
"""
# Check if all `Required` configs are found in user configs.
usr_keys = self.usr_config.keys()
for k in six.iterkeys(self.usr_config):
if k not in usr_keys:
err_msg = "[Error] Required config not found in user config."
err_msg += "(required = %s, " % str(k)
err_msg += "user configs = %s)" % str(usr_keys)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([k, err_msg])
return False
# Parse each user config and validate its compatibility.
overall_status = True
for config_name, spec in six.iteritems(self.usr_config):
temp_status = True
# Check under which section the user config is defined.
in_required = config_name in self.required.keys()
in_optional = config_name in self.optional.keys()
in_unsupported = config_name in self.unsupported.keys()
in_dependency = config_name in self.dependency.keys()
# Add to warning if user config is not specified in the config file.
if not (in_required or in_optional or in_unsupported or in_dependency):
warn_msg = "[Error] User config not defined in config file."
warn_msg += "(user config = %s)" % str(config_name)
logging.warning(warn_msg)
self.warning_msg.append(warn_msg)
self.failures.append([config_name, warn_msg])
temp_status = False
else:
if in_unsupported:
if self.in_range(spec, self.unsupported[config_name]):
err_msg = "[Error] User config is unsupported. It is "
err_msg += "defined under 'Unsupported' section in the config file."
err_msg += " (config = %s, spec = %s)" % (config_name, str(spec))
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_required:
if not self.in_range(spec, self.required[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Required' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
if in_optional:
if not self.in_range(spec, self.optional[config_name]):
err_msg = "[Error] User config cannot be supported. It is not in "
err_msg += "the supported range as defined in the 'Optional' "
err_msg += "section. (config = %s, " % config_name
err_msg += "spec = %s)" % str(spec)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
# If user config and version has a dependency, check both user
# config + version and dependency config + version are supported.
if in_dependency:
# Get dependency information. The information gets retrieved in the
# following format:
# [`config`, `config _Reqs()`, `dependency`, `dependency _Reqs()`]
dep_list = self.dependency[config_name]
if dep_list:
for rule in dep_list:
cfg = rule[0] # config name
cfg_req = rule[1] # _Reqs() instance for config requirement
dep = rule[2] # dependency name
dep_req = rule[3] # _Reqs() instance for dependency requirement
# Check if user config has a dependency in the following sequence:
# [1] Check user config and the config that has dependency
# are the same. (This is defined as `cfg_status`.)
# [2] Check if dependency is supported.
try:
cfg_name = self.usr_config[cfg]
dep_name = self.usr_config[dep]
cfg_status = self.in_range(cfg_name, cfg_req)
dep_status = self.in_range(dep_name, dep_req)
# If both status's are `True`, then user config meets dependency
# spec.
if cfg_status:
if not dep_status:
# throw error
err_msg = "[Error] User config has a dependency that cannot"
err_msg += " be supported. "
err_msg += "'%s' has a dependency on " % str(config_name)
err_msg += "'%s'." % str(dep)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
except KeyError:
err_msg = "[Error] Dependency is missing from `Required`. "
err_msg += "(config = %s, ""dep = %s)" % (cfg, dep)
logging.error(err_msg)
self.error_msg.append(err_msg)
self.failures.append([config_name, err_msg])
temp_status = False
# At this point, all requirement related to the user config has been
# checked and passed. Append to `successes` list.
if temp_status:
self.successes.append([config_name, spec])
else:
overall_status = False
return overall_status
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Tests for version compatibility checker for TensorFlow Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from tensorflow.tools.tensorflow_builder.compat_checker import compat_checker
PATH_TO_DIR = "tensorflow/tools/tensorflow_builder/compat_checker"
USER_CONFIG_IN_RANGE = {
"apple": ["1.0"],
"banana": ["3"],
"kiwi": ["2.0"],
"watermelon": ["2.0.0"],
"orange": ["4.1"],
"cherry": ["1.5"],
"cranberry": ["1.0"],
"raspberry": ["3.0"],
"tangerine": ["2.0.0"],
"jackfruit": ["1.0"],
"grapefruit": ["2.0"],
"apricot": ["wind", "flower"],
"grape": ["7.1"],
"blueberry": ["3.0"]
}
USER_CONFIG_NOT_IN_RANGE = {
"apple": ["4.0"],
"banana": ["5"],
"kiwi": ["3.5"],
"watermelon": ["5.0"],
"orange": ["3.5"],
"cherry": ["2.0"],
"raspberry": ["-1"],
"cranberry": ["4.5"],
"tangerine": ["0"],
"jackfruit": ["5.0"],
"grapefruit": ["2.5"],
"apricot": ["hello", "world"],
"blueberry": ["11.0"],
"grape": ["7.0"],
"cantaloupe": ["11.0"]
}
USER_CONFIG_MISSING = {
"avocado": ["3.0"],
"apple": [],
"banana": ""
}
class CompatCheckerTest(unittest.TestCase):
def setUp(self):
"""Set up test."""
super(CompatCheckerTest, self).setUp()
self.test_file = os.path.join(PATH_TO_DIR, "test_config.ini")
def testWithUserConfigInRange(self):
"""Test a set of configs that are supported.
Testing with the following combination should always return `success`:
[1] A set of configurations that are supported and/or compatible.
[2] `.ini` config file with proper formatting.
"""
# Initialize compatibility checker.
self.compat_checker = compat_checker.ConfigCompatChecker(
USER_CONFIG_IN_RANGE, self.test_file)
# Compatibility check should succeed.
self.assertTrue(self.compat_checker.check_compatibility())
# Make sure no warning or error messages are recorded.
self.assertFalse(len(self.compat_checker.error_msg))
# Make sure total # of successes match total # of configs.
cnt = len(USER_CONFIG_IN_RANGE.keys())
self.assertEqual(len(self.compat_checker.successes), cnt)
def testWithUserConfigNotInRange(self):
"""Test a set of configs that are NOT supported.
Testing with the following combination should always return `failure`:
[1] A set of configurations that are NOT supported and/or compatible.
[2] `.ini` config file with proper formatting.
"""
self.compat_checker = compat_checker.ConfigCompatChecker(
USER_CONFIG_NOT_IN_RANGE, self.test_file)
# Compatibility check should fail.
self.assertFalse(self.compat_checker.check_compatibility())
# Check error and warning messages.
err_msg_list = self.compat_checker.failures
self.assertTrue(len(err_msg_list))
# Make sure total # of failures match total # of configs.
cnt = len(USER_CONFIG_NOT_IN_RANGE.keys())
self.assertEqual(len(err_msg_list), cnt)
def testWithUserConfigMissing(self):
"""Test a set of configs that are empty or missing specification."""
self.compat_checker = compat_checker.ConfigCompatChecker(
USER_CONFIG_MISSING, self.test_file)
# With missing specification in config file, the check should
# always fail.
self.assertFalse(self.compat_checker.check_compatibility())
if __name__ == "__main__":
unittest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/tensorflow_builder/compat_checker/compat_checker_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for operating on Python API Guide files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
def md_files_in_dir(py_guide_src_dir):
"""Returns a list of filename (full_path, base) pairs for guide files."""
all_in_dir = [(os.path.join(py_guide_src_dir, f), f)
for f in os.listdir(py_guide_src_dir)]
return [(full, f) for full, f in all_in_dir
if os.path.isfile(full) and f.endswith('.md')]
class PyGuideParser(object):
"""Simple parsing of a guide .md file.
Descendants can override the process_*() functions (called by process())
to either record information from the guide, or call replace_line()
to affect the return value of process().
"""
def __init__(self):
self._lines = None
def process(self, full_path):
"""Read and process the file at `full_path`."""
with open(full_path, 'rb') as f:
md_string = f.read().decode('utf-8')
self._lines = md_string.split('\n')
seen = set()
in_blockquote = False
for i, line in enumerate(self._lines):
if '```' in line:
in_blockquote = not in_blockquote
if not in_blockquote and line.startswith('# '):
self.process_title(i, line[2:])
elif not in_blockquote and line.startswith('## '):
section_title = line.strip()[3:]
existing_tag = re.search(' {([^}]+)} *$', line)
if existing_tag:
tag = existing_tag.group(1)
else:
tag = re.sub('[^a-zA-Z0-9]+', '_', section_title)
if tag in seen:
suffix = 0
while True:
candidate = '%s_%d' % (tag, suffix)
if candidate not in seen:
tag = candidate
break
seen.add(tag)
self.process_section(i, section_title, tag)
elif in_blockquote:
self.process_in_blockquote(i, line)
else:
self.process_line(i, line)
ret = '\n'.join(self._lines)
self._lines = None
return ret
def replace_line(self, line_number, line):
"""Replace the contents of line numbered `line_number` with `line`."""
self._lines[line_number] = line
def process_title(self, line_number, title):
pass
def process_section(self, line_number, section_title, tag):
pass
def process_in_blockquote(self, line_number, line):
pass
def process_line(self, line_number, line):
pass
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/py_guide_parser.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tools.docs.doc_generator_visitor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import generate_lib
class NoDunderVisitor(doc_generator_visitor.DocGeneratorVisitor):
def __call__(self, parent_name, parent, children):
"""Drop all the dunder methods to make testing easier."""
children = [
(name, obj) for (name, obj) in children if not name.startswith('_')
]
super(NoDunderVisitor, self).__call__(parent_name, parent, children)
class DocGeneratorVisitorTest(googletest.TestCase):
def test_call_module(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'doc_generator_visitor', doc_generator_visitor,
[('DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor)])
self.assertEqual({'doc_generator_visitor': ['DocGeneratorVisitor']},
visitor.tree)
self.assertEqual({
'doc_generator_visitor': doc_generator_visitor,
'doc_generator_visitor.DocGeneratorVisitor':
doc_generator_visitor.DocGeneratorVisitor,
}, visitor.index)
def test_call_class(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor,
[('index', doc_generator_visitor.DocGeneratorVisitor.index)])
self.assertEqual({'DocGeneratorVisitor': ['index']},
visitor.tree)
self.assertEqual({
'DocGeneratorVisitor': doc_generator_visitor.DocGeneratorVisitor,
'DocGeneratorVisitor.index':
doc_generator_visitor.DocGeneratorVisitor.index
}, visitor.index)
def test_call_raises(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
with self.assertRaises(RuntimeError):
visitor('non_class_or_module', 'non_class_or_module_object', [])
def test_duplicates_module_class_depth(self):
class Parent(object):
class Nested(object):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted([
'tf.Parent',
'tf.submodule.Parent',
]),
'tf.submodule.Parent.Nested':
sorted([
'tf.Parent.Nested',
'tf.submodule.Parent.Nested',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Parent.Nested': 'tf.submodule.Parent.Nested',
'tf.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(Parent): 'tf.submodule.Parent',
id(Parent.Nested): 'tf.submodule.Parent.Nested',
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
}, visitor.reverse_index)
def test_duplicates_contrib(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.contrib = types.ModuleType('contrib')
tf.submodule = types.ModuleType('submodule')
tf.contrib.Parent = Parent
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted(['tf.contrib.Parent', 'tf.submodule.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.contrib.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(tf.contrib): 'tf.contrib',
}, visitor.reverse_index)
def test_duplicates_defining_class(self):
class Parent(object):
obj1 = object()
class Child(Parent):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.Child = Child
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent.obj1': sorted([
'tf.Parent.obj1',
'tf.Child.obj1',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Child.obj1': 'tf.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(Parent): 'tf.Parent',
id(Child): 'tf.Child',
id(Parent.obj1): 'tf.Parent.obj1',
}, visitor.reverse_index)
def test_duplicates_module_depth(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.submodule2 = types.ModuleType('submodule2')
tf.Parent = Parent
tf.submodule.submodule2.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent': sorted(['tf.Parent', 'tf.submodule.submodule2.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.submodule2.Parent': 'tf.Parent'
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(tf.submodule.submodule2): 'tf.submodule.submodule2',
id(Parent): 'tf.Parent',
}, visitor.reverse_index)
def test_duplicates_name(self):
class Parent(object):
obj1 = object()
Parent.obj2 = Parent.obj1
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent.obj1':
sorted([
'tf.submodule.Parent.obj1',
'tf.submodule.Parent.obj2',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.Parent.obj2': 'tf.submodule.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(Parent.obj1): 'tf.submodule.Parent.obj1',
}, visitor.reverse_index)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/doc_generator_visitor_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tool to generate api_docs for TensorFlow2.
```
python generate2.py --output_dir=/tmp/out
```
Requires a local installation of:
https://github.com/tensorflow/docs/tree/master/tools
tf-nightly-2.0-preview
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import textwrap
from absl import app
from absl import flags
from distutils.version import LooseVersion
import tensorflow as tf
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import parser
import tensorboard
import tensorflow_estimator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
# Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`.
parser.tf_inspect = tf_inspect
# `tf` has an `__all__` that doesn't list important things like `keras`.
# The doc generator recognizes `__all__` as the list of public symbols.
# So patch `tf.__all__` to list everything.
tf.__all__ = [item_name for item_name, value in tf_inspect.getmembers(tf)]
FLAGS = flags.FLAGS
flags.DEFINE_string(
"code_url_prefix",
"/code/stable/tensorflow",
"A url to prepend to code paths when creating links to defining code")
flags.DEFINE_string(
"output_dir", "/tmp/out",
"A directory, where the docs will be output to.")
flags.DEFINE_bool("search_hints", True,
"Include meta-data search hints at the top of each file.")
flags.DEFINE_string("site_path", "",
"The prefix ({site-path}/api_docs/python/...) used in the "
"`_toc.yaml` and `_redirects.yaml` files")
if tf.__version__.startswith('1'):
PRIVATE_MAP = {
'tf.contrib.autograph': ['utils', 'operators'],
'tf.test': ['mock'],
'tf.contrib.estimator': ['python'],
}
DO_NOT_DESCEND_MAP = {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
else:
PRIVATE_MAP = {}
DO_NOT_DESCEND_MAP = {}
tf.__doc__ = """
## TensorFlow 2.0 Beta
Caution: This is a developer preview. You will likely find some bugs,
performance issues, and more, and we encourage you to tell us about them.
We value your feedback!
These docs were generated from the beta build of TensorFlow 2.0.
You can install the exact version that was used to generate these docs
with:
```
pip install tensorflow==2.0.0-beta1
```
"""
_raw_ops_doc = textwrap.dedent("""\n
Note: `tf.raw_ops` provides direct/low level access to all TensorFlow ops. See \
[the RFC](https://github.com/tensorflow/community/blob/master/rfcs/20181225-tf-raw-ops.md)
for details. Unless you are library writer, you likely do not need to use these
ops directly.""")
if LooseVersion(tf.__version__) < LooseVersion('2'):
tf.raw_ops.__doc__ = _raw_ops_doc
tf.contrib.__doc__ = """
Contrib module containing volatile or experimental code.
Warning: The `tf.contrib` module will not be included in TensorFlow 2.0. Many
of its submodules have been integrated into TensorFlow core, or spun-off into
other projects like [`tensorflow_io`](https://github.com/tensorflow/io), or
[`tensorflow_addons`](https://github.com/tensorflow/addons). For instructions
on how to upgrade see the
[Migration guide](https://www.tensorflow.org/beta/guide/migration_guide).
"""
else:
tf.raw_ops.__doc__ += _raw_ops_doc
# The doc generator isn't aware of tf_export.
# So prefix the score tuples with -1 when this is the canonical name, +1
# otherwise. The generator chooses the name with the lowest score.
class TfExportAwareDocGeneratorVisitor(
doc_generator_visitor.DocGeneratorVisitor):
"""A `tf_export` aware doc_visitor."""
def _score_name(self, name):
canonical = tf_export.get_canonical_name_for_symbol(self._index[name])
canonical_score = 1
if canonical is not None and name == "tf." + canonical:
canonical_score = -1
scores = super(TfExportAwareDocGeneratorVisitor, self)._score_name(name)
return (canonical_score,) + scores
def _hide_layer_and_module_methods():
"""Hide methods and properties defined in the base classes of keras layers."""
# __dict__ only sees attributes defined in *this* class, not on parent classes
module_contents = list(tf.Module.__dict__.items())
layer_contents = list(tf.keras.layers.Layer.__dict__.items())
for name, obj in module_contents + layer_contents:
if name == "__init__":
continue
if isinstance(obj, property):
obj = obj.fget
if isinstance(obj, (staticmethod, classmethod)):
obj = obj.__func__
try:
doc_controls.do_not_doc_in_subclasses(obj)
except AttributeError:
pass
def build_docs(output_dir, code_url_prefix, search_hints=True):
"""Build api docs for tensorflow v2.
Args:
output_dir: A string path, where to put the files.
code_url_prefix: prefix for "Defined in" links.
search_hints: Bool. Include meta-data search hints at the top of each file.
"""
_hide_layer_and_module_methods()
try:
doc_controls.do_not_generate_docs(tf.tools)
except AttributeError:
pass
try:
doc_controls.do_not_generate_docs(tf.compat.v1.pywrap_tensorflow)
except AttributeError:
pass
try:
doc_controls.do_not_generate_docs(tf.pywrap_tensorflow)
except AttributeError:
pass
try:
doc_controls.do_not_generate_docs(tf.flags)
except AttributeError:
pass
base_dir = path.dirname(tf.__file__)
base_dirs = (
base_dir,
# External packages base directories,
path.dirname(tensorboard.__file__),
path.dirname(tensorflow_estimator.__file__),
)
code_url_prefixes = (
code_url_prefix,
# External packages source repositories,
"https://github.com/tensorflow/tensorboard/tree/master/tensorboard",
"https://github.com/tensorflow/estimator/tree/master/tensorflow_estimator",
)
if LooseVersion(tf.__version__) < LooseVersion('2'):
root_title = 'TensorFlow'
elif LooseVersion(tf.__version__) >= LooseVersion('2'):
root_title = 'TensorFlow 2.0'
doc_generator = generate_lib.DocGenerator(
root_title=root_title,
py_modules=[("tf", tf)],
base_dir=base_dirs,
search_hints=search_hints,
code_url_prefix=code_url_prefixes,
site_path=FLAGS.site_path,
visitor_cls=TfExportAwareDocGeneratorVisitor,
private_map=PRIVATE_MAP,
do_not_descend_map=DO_NOT_DESCEND_MAP)
doc_generator.build(output_dir)
def main(argv):
del argv
build_docs(output_dir=FLAGS.output_dir,
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints)
if __name__ == "__main__":
app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/generate2.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for converting parsed doc content into markdown pages.
The adjacent `parser` module creates `PageInfo` objects, containing all data
necessary to document an element of the TensorFlow API.
This module contains one public function, which handels the conversion of these
`PageInfo` objects into a markdown string:
md_page = build_md_page(page_info)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
def build_md_page(page_info):
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if page_info.for_function():
return _build_function_page(page_info)
if page_info.for_class():
return _build_class_page(page_info)
if page_info.for_module():
return _build_module_page(page_info)
raise ValueError('Unknown Page Info Type: %s' % type(page_info))
def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = ['# %s\n\n' % page_info.full_name]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts)
def _build_class_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# {page_info.full_name}\n\n'.format(page_info=page_info)]
parts.append('## Class `%s`\n\n' % page_info.full_name.split('.')[-1])
if page_info.bases:
parts.append('Inherits From: ')
link_template = '[`{short_name}`]({url})'
parts.append(', '.join(
link_template.format(**base._asdict()) for base in page_info.bases))
parts.append('\n\n')
# Sort the methods list, but make sure constructors come first.
constructor_names = ['__init__', '__new__']
constructors = sorted(
method for method in page_info.methods
if method.short_name in constructor_names)
other_methods = sorted(
method for method in page_info.methods
if method.short_name not in constructor_names)
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Class `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if constructors:
for method_info in constructors:
parts.append(_build_method_section(method_info, heading_level=2))
parts.append('\n\n')
if page_info.classes:
parts.append('## Child Classes\n')
link_template = ('[`class {class_info.short_name}`]'
'({class_info.url})\n\n')
class_links = sorted(
link_template.format(class_info=class_info)
for class_info in page_info.classes)
parts.extend(class_links)
if page_info.properties:
parts.append('## Properties\n\n')
for prop_info in page_info.properties:
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
parts.append(h3.format(short_name=prop_info.short_name))
parts.append(prop_info.doc.docstring)
parts.append(_build_function_details(prop_info.doc.function_details))
parts.append(_build_compatibility(prop_info.doc.compatibility))
parts.append('\n\n')
parts.append('\n\n')
if other_methods:
parts.append('## Methods\n\n')
for method_info in other_methods:
parts.append(_build_method_section(method_info))
parts.append('\n\n')
if page_info.other_members:
parts.append('## Class Members\n\n')
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
others_member_headings = (h3.format(short_name=info.short_name)
for info in sorted(page_info.other_members))
parts.extend(others_member_headings)
return ''.join(parts)
def _build_method_section(method_info, heading_level=3):
"""Generates a markdown section for a method.
Args:
method_info: A `MethodInfo` object.
heading_level: An Int, which HTML heading level to use.
Returns:
A markdown string.
"""
parts = []
heading = ('<h{heading_level} id="{short_name}">'
'<code>{short_name}</code>'
'</h{heading_level}>\n\n')
parts.append(heading.format(heading_level=heading_level,
**method_info._asdict()))
if method_info.signature is not None:
parts.append(_build_signature(method_info, use_full_name=False))
parts.append(method_info.doc.docstring)
parts.append(_build_function_details(method_info.doc.function_details))
parts.append(_build_compatibility(method_info.doc.compatibility))
parts.append('\n\n')
return ''.join(parts)
def _build_module_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# Module: {full_name}\n\n'.format(full_name=page_info.full_name)]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Module `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.doc.docstring)
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if page_info.modules:
parts.append('## Modules\n\n')
template = '[`{short_name}`]({url}) module'
for item in page_info.modules:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.classes:
parts.append('## Classes\n\n')
template = '[`class {short_name}`]({url})'
for item in page_info.classes:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.functions:
parts.append('## Functions\n\n')
template = '[`{short_name}(...)`]({url})'
for item in page_info.functions:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.other_members:
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
parts.append('## Other Members\n\n')
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
for item in page_info.other_members:
parts.append(h3.format(**item._asdict()))
return ''.join(parts)
def _build_signature(obj_info, use_full_name=True):
"""Returns a md code block showing the function signature."""
# Special case tf.range, since it has an optional first argument
if obj_info.full_name == 'tf.range':
return (
'``` python\n'
"tf.range(limit, delta=1, dtype=None, name='range')\n"
"tf.range(start, limit, delta=1, dtype=None, name='range')\n"
'```\n\n')
parts = ['``` python']
parts.extend(['@' + dec for dec in obj_info.decorators])
signature_template = '{name}({sig})'
if not obj_info.signature:
sig = ''
elif len(obj_info.signature) == 1:
sig = obj_info.signature[0]
else:
sig = ',\n'.join(' %s' % sig_item for sig_item in obj_info.signature)
sig = '\n'+sig+'\n'
if use_full_name:
obj_name = obj_info.full_name
else:
obj_name = obj_info.short_name
parts.append(signature_template.format(name=obj_name, sig=sig))
parts.append('```\n\n')
return '\n'.join(parts)
def _build_compatibility(compatibility):
"""Return the compatibility section as an md string."""
parts = []
sorted_keys = sorted(compatibility.keys())
for key in sorted_keys:
value = compatibility[key]
# Dedent so that it does not trigger markdown code formatting.
value = textwrap.dedent(value)
parts.append('\n\n#### %s Compatibility\n%s\n' % (key.title(), value))
return ''.join(parts)
def _build_function_details(function_details):
"""Return the function details section as an md string."""
parts = []
for detail in function_details:
sub = []
sub.append('#### ' + detail.keyword + ':\n\n')
sub.append(textwrap.dedent(detail.header))
for key, value in detail.items:
sub.append('* <b>`%s`</b>: %s' % (key, value))
parts.append(''.join(sub))
return '\n'.join(parts)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/pretty_docs.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.tools.docs.generate2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tensorflow as tf
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import generate2
# Including the compat modules just makes the test take a lot longer.
# Ignoring these is okay, as the main risks of failure are around `tf.contrib`
# and any other modules that are not generated by tf_export.
del tf.compat.v2
del tf.compat.v1
class Generate2Test(googletest.TestCase):
def test_end_to_end(self):
output_dir = os.path.join(googletest.GetTempDir(), 'output')
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
generate2.build_docs(output_dir=output_dir, code_url_prefix='')
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/generate2_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import generate_lib
if __name__ == '__main__':
doc_generator = generate_lib.DocGenerator()
doc_generator.add_output_dir_argument()
doc_generator.add_src_dir_argument()
# This doc generator works on the TensorFlow codebase. Since this script lives
# at tensorflow/tools/docs, and all code is defined somewhere inside
# tensorflow/, we can compute the base directory (two levels up), which is
# valid unless we're trying to apply this to a different code base, or are
# moving the script around.
script_dir = os.path.dirname(tf_inspect.getfile(tf_inspect.currentframe()))
default_base_dir = os.path.join(script_dir, '..', '..')
doc_generator.add_base_dir_argument(default_base_dir)
flags = doc_generator.parse_known_args()
# tf_debug is not imported with tf, it's a separate module altogether
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
sys.exit(doc_generator.build(flags))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/generate.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `traverse` visitor for processing documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
class DocGeneratorVisitor(object):
"""A visitor that generates docs for a python object when __call__ed."""
def __init__(self, root_name=''):
"""Make a visitor.
As this visitor is starting its traversal at a module or class, it will not
be told the name of that object during traversal. `root_name` is the name it
should use for that object, effectively prefixing all names with
"root_name.".
Args:
root_name: The name of the root module/class.
"""
self.set_root_name(root_name)
self._index = {}
self._tree = {}
self._reverse_index = None
self._duplicates = None
self._duplicate_of = None
def set_root_name(self, root_name):
"""Sets the root name for subsequent __call__s."""
self._root_name = root_name or ''
self._prefix = (root_name + '.') if root_name else ''
@property
def index(self):
"""A map from fully qualified names to objects to be documented.
The index is filled when the visitor is passed to `traverse`.
Returns:
The index filled by traversal.
"""
return self._index
@property
def tree(self):
"""A map from fully qualified names to all its child names for traversal.
The full name to member names map is filled when the visitor is passed to
`traverse`.
Returns:
The full name to member name map filled by traversal.
"""
return self._tree
@property
def reverse_index(self):
"""A map from `id(object)` to the preferred fully qualified name.
This map only contains non-primitive objects (no numbers or strings) present
in `index` (for primitive objects, `id()` doesn't quite do the right thing).
It is computed when it, `duplicate_of`, or `duplicates` are first accessed.
Returns:
The `id(object)` to full name map.
"""
self._maybe_find_duplicates()
return self._reverse_index
@property
def duplicate_of(self):
"""A map from duplicate full names to a preferred fully qualified name.
This map only contains names that are not themself a preferred name.
It is computed when it, `reverse_index`, or `duplicates` are first accessed.
Returns:
The map from duplicate name to preferred name.
"""
self._maybe_find_duplicates()
return self._duplicate_of
@property
def duplicates(self):
"""A map from preferred full names to a list of all names for this symbol.
This function returns a map from preferred (master) name for a symbol to a
lexicographically sorted list of all aliases for that name (incl. the master
name). Symbols without duplicate names do not appear in this map.
It is computed when it, `reverse_index`, or `duplicate_of` are first
accessed.
Returns:
The map from master name to list of all duplicate names.
"""
self._maybe_find_duplicates()
return self._duplicates
def _add_prefix(self, name):
"""Adds the root name to a name."""
return self._prefix + name if name else self._root_name
def __call__(self, parent_name, parent, children):
"""Visitor interface, see `tensorflow/tools/common:traverse` for details.
This method is called for each symbol found in a traversal using
`tensorflow/tools/common:traverse`. It should not be called directly in
user code.
Args:
parent_name: The fully qualified name of a symbol found during traversal.
parent: The Python object referenced by `parent_name`.
children: A list of `(name, py_object)` pairs enumerating, in alphabetical
order, the children (as determined by `tf_inspect.getmembers`) of
`parent`. `name` is the local name of `py_object` in `parent`.
Raises:
RuntimeError: If this visitor is called with a `parent` that is not a
class or module.
"""
parent_name = self._add_prefix(parent_name)
self._index[parent_name] = parent
self._tree[parent_name] = []
if not (tf_inspect.ismodule(parent) or tf_inspect.isclass(parent)):
raise RuntimeError('Unexpected type in visitor -- %s: %r' % (parent_name,
parent))
for i, (name, child) in enumerate(list(children)):
# Don't document __metaclass__
if name in ['__metaclass__']:
del children[i]
continue
full_name = '.'.join([parent_name, name]) if parent_name else name
self._index[full_name] = child
self._tree[parent_name].append(name)
def _score_name(self, name):
"""Return a tuple of scores indicating how to sort for the best name.
This function is meant to be used as the `key` to the `sorted` function.
This sorting in order:
Prefers names refering to the defining class, over a subclass.
Prefers names that are not in "contrib".
prefers submodules to the root namespace.
Prefers short names `tf.thing` over `tf.a.b.c.thing`
Sorts lexicographically on name parts.
Args:
name: the full name to score, for example `tf.estimator.Estimator`
Returns:
A tuple of scores. When sorted the preferred name will have the lowest
value.
"""
parts = name.split('.')
short_name = parts[-1]
container = self._index['.'.join(parts[:-1])]
defining_class_score = 1
if tf_inspect.isclass(container):
if short_name in container.__dict__:
# prefer the defining class
defining_class_score = -1
contrib_score = -1
if 'contrib' in parts:
contrib_score = 1
while parts:
container = self._index['.'.join(parts)]
if tf_inspect.ismodule(container):
break
parts.pop()
module_length = len(parts)
if len(parts) == 2:
# `tf.submodule.thing` is better than `tf.thing`
module_length_score = -1
else:
# shorter is better
module_length_score = module_length
return (defining_class_score, contrib_score, module_length_score, name)
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
Find duplicates in `index` and decide on one to be the "master" name.
Computes a reverse_index mapping each object id to its master name.
Also computes a map `duplicate_of` from aliases to their master name (the
master name itself has no entry in this map), and a map `duplicates` from
master names to a lexicographically sorted list of all aliases for that name
(incl. the master name).
All these are computed and set as fields if they haven't already.
"""
if self._reverse_index is not None:
return
# Maps the id of a symbol to its fully qualified name. For symbols that have
# several aliases, this map contains the first one found.
# We use id(py_object) to get a hashable value for py_object. Note all
# objects in _index are in memory at the same time so this is safe.
reverse_index = {}
# Make a preliminary duplicates map. For all sets of duplicate names, it
# maps the first name found to a list of all duplicate names.
raw_duplicates = {}
for full_name, py_object in six.iteritems(self._index):
# We cannot use the duplicate mechanism for some constants, since e.g.,
# id(c1) == id(c2) with c1=1, c2=1. This is unproblematic since constants
# have no usable docstring and won't be documented automatically.
if (py_object not in (None, ())
not isinstance(py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool))):
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
if master_name in raw_duplicates:
raw_duplicates[master_name].append(full_name)
else:
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
duplicate_of = {}
# Duplicates maps the main symbols to the set of all duplicates of that
# symbol (incl. itself).
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
master_name = (
tf_export.get_canonical_name_for_symbol(self._index[names[0]])
if names else None)
if master_name:
master_name = 'tf.%s' % master_name
else:
# Choose the master name with a lexical sort on the tuples returned by
# by _score_name.
master_name = min(names, key=self._score_name)
duplicates[master_name] = names
for name in names:
if name != master_name:
duplicate_of[name] = master_name
# Set the reverse index to the canonical name.
reverse_index[id(self._index[master_name])] = master_name
self._duplicate_of = duplicate_of
self._duplicates = duplicates
self._reverse_index = reverse_index
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/doc_generator_visitor.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Documentation control decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
_DO_NOT_DOC = "_tf_docs_do_not_document"
def do_not_generate_docs(obj):
"""A decorator: Do not generate docs for this object.
For example the following classes:
```
class Parent(object):
def method1(self):
pass
def method2(self):
pass
class Child(Parent):
def method1(self):
pass
def method2(self):
pass
```
Produce the following api_docs:
```
/Parent.md
# method1
# method2
/Child.md
# method1
# method2
```
This decorator allows you to skip classes or methods:
```
@do_not_generate_docs
class Parent(object):
def method1(self):
pass
def method2(self):
pass
class Child(Parent):
@do_not_generate_docs
def method1(self):
pass
def method2(self):
pass
```
This will only produce the following docs:
```
/Child.md
# method2
```
Note: This is implemented by adding a hidden attribute on the object, so it
cannot be used on objects which do not allow new attributes to be added. So
this decorator must go *below* `@property`, `@classmethod`,
or `@staticmethod`:
```
class Example(object):
@property
@do_not_generate_docs
def x(self):
return self._x
```
Args:
obj: The object to hide from the generated docs.
Returns:
obj
"""
setattr(obj, _DO_NOT_DOC, None)
return obj
_DO_NOT_DOC_INHERITABLE = "_tf_docs_do_not_doc_inheritable"
def do_not_doc_inheritable(obj):
"""A decorator: Do not generate docs for this method.
This version of the decorator is "inherited" by subclasses. No docs will be
generated for the decorated method in any subclass. Even if the sub-class
overrides the method.
For example, to ensure that `method1` is **never documented** use this
decorator on the base-class:
```
class Parent(object):
@do_not_doc_inheritable
def method1(self):
pass
def method2(self):
pass
class Child(Parent):
def method1(self):
pass
def method2(self):
pass
```
This will produce the following docs:
```
/Parent.md
# method2
/Child.md
# method2
```
When generating docs for a class's arributes, the `__mro__` is searched and
the attribute will be skipped if this decorator is detected on the attribute
on any class in the `__mro__`.
Note: This is implemented by adding a hidden attribute on the object, so it
cannot be used on objects which do not allow new attributes to be added. So
this decorator must go *below* `@property`, `@classmethod`,
or `@staticmethod`:
```
class Example(object):
@property
@do_not_doc_inheritable
def x(self):
return self._x
```
Args:
obj: The class-attribute to hide from the generated docs.
Returns:
obj
"""
setattr(obj, _DO_NOT_DOC_INHERITABLE, None)
return obj
_FOR_SUBCLASS_IMPLEMENTERS = "_tf_docs_tools_for_subclass_implementers"
def for_subclass_implementers(obj):
"""A decorator: Only generate docs for this method in the defining class.
Also group this method's docs with and `@abstractmethod` in the class's docs.
No docs will generated for this class attribute in sub-classes.
The canonical use case for this is `tf.keras.layers.Layer.call`: It's a
public method, essential for anyone implementing a subclass, but it should
never be called directly.
Works on method, or other class-attributes.
When generating docs for a class's arributes, the `__mro__` is searched and
the attribute will be skipped if this decorator is detected on the attribute
on any **parent** class in the `__mro__`.
For example:
```
class Parent(object):
@for_subclass_implementers
def method1(self):
pass
def method2(self):
pass
class Child1(Parent):
def method1(self):
pass
def method2(self):
pass
class Child2(Parent):
def method1(self):
pass
def method2(self):
pass
```
This will produce the following docs:
```
/Parent.md
# method1
# method2
/Child1.md
# method2
/Child2.md
# method2
```
Note: This is implemented by adding a hidden attribute on the object, so it
cannot be used on objects which do not allow new attributes to be added. So
this decorator must go *below* `@property`, `@classmethod`,
or `@staticmethod`:
```
class Example(object):
@property
@for_subclass_implementers
def x(self):
return self._x
```
Args:
obj: The class-attribute to hide from the generated docs.
Returns:
obj
"""
setattr(obj, _FOR_SUBCLASS_IMPLEMENTERS, None)
return obj
do_not_doc_in_subclasses = for_subclass_implementers
def should_skip(obj):
"""Returns true if docs generation should be skipped for this object.
checks for the `do_not_generate_docs` or `do_not_doc_inheritable` decorators.
Args:
obj: The object to document, or skip.
Returns:
True if the object should be skipped
"""
# Unwrap fget if the object is a property
if isinstance(obj, property):
obj = obj.fget
return hasattr(obj, _DO_NOT_DOC) or hasattr(obj, _DO_NOT_DOC_INHERITABLE)
def should_skip_class_attr(cls, name):
"""Returns true if docs should be skipped for this class attribute.
Args:
cls: The class the attribute belongs to.
name: The name of the attribute.
Returns:
True if the attribute should be skipped.
"""
# Get the object with standard lookup, from the nearest
# defining parent.
try:
obj = getattr(cls, name)
except AttributeError:
# Avoid error caused by enum metaclasses in python3
if name in ("name", "value"):
return True
raise
# Unwrap fget if the object is a property
if isinstance(obj, property):
obj = obj.fget
# Skip if the object is decorated with `do_not_generate_docs` or
# `do_not_doc_inheritable`
if should_skip(obj):
return True
# Use __dict__ lookup to get the version defined in *this* class.
obj = cls.__dict__.get(name, None)
if isinstance(obj, property):
obj = obj.fget
if obj is not None:
# If not none, the object is defined in *this* class.
# Do not skip if decorated with `for_subclass_implementers`.
if hasattr(obj, _FOR_SUBCLASS_IMPLEMENTERS):
return False
# for each parent class
for parent in cls.__mro__[1:]:
obj = getattr(parent, name, None)
if obj is None:
continue
if isinstance(obj, property):
obj = obj.fget
# Skip if the parent's definition is decorated with `do_not_doc_inheritable`
# or `for_subclass_implementers`
if hasattr(obj, _DO_NOT_DOC_INHERITABLE):
return True
if hasattr(obj, _FOR_SUBCLASS_IMPLEMENTERS):
return True
# No blockng decorators --> don't skip
return False
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/doc_controls.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for documentation parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import parser
# The test needs a real module. `types.ModuleType()` doesn't work, as the result
# is a `builtin` module. Using "parser" here is arbitraty. The tests don't
# depend on the module contents. At this point in the process the public api
# has already been extracted.
test_module = parser
def test_function(unused_arg, unused_kwarg='default'):
"""Docstring for test function."""
pass
def test_function_with_args_kwargs(unused_arg, *unused_args, **unused_kwargs):
"""Docstring for second test function."""
pass
class ParentClass(object):
@doc_controls.do_not_doc_inheritable
def hidden_method(self):
pass
class TestClass(ParentClass):
"""Docstring for TestClass itself."""
def a_method(self, arg='default'):
"""Docstring for a method."""
pass
def hidden_method(self):
pass
@doc_controls.do_not_generate_docs
def hidden_method2(self):
pass
class ChildClass(object):
"""Docstring for a child class."""
pass
@property
def a_property(self):
"""Docstring for a property."""
pass
CLASS_MEMBER = 'a class member'
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class ParserTest(googletest.TestCase):
def test_documentation_path(self):
self.assertEqual('test.md', parser.documentation_path('test'))
self.assertEqual('test/module.md', parser.documentation_path('test.module'))
def test_replace_references(self):
class HasOneMember(object):
def foo(self):
pass
string = (
'A @{tf.reference}, another @{tf.reference$with\nnewline}, a member '
'@{tf.reference.foo}, and a @{tf.third$link `text` with `code` in '
'it}.')
duplicate_of = {'tf.third': 'tf.fourth'}
index = {'tf.reference': HasOneMember,
'tf.reference.foo': HasOneMember.foo,
'tf.third': HasOneMember,
'tf.fourth': HasOneMember}
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
result = reference_resolver.replace_references(string, '../..')
self.assertEqual('A <a href="../../tf/reference.md">'
'<code>tf.reference</code></a>, '
'another <a href="../../tf/reference.md">'
'with\nnewline</a>, '
'a member <a href="../../tf/reference.md#foo">'
'<code>tf.reference.foo</code></a>, '
'and a <a href="../../tf/fourth.md">link '
'<code>text</code> with '
'<code>code</code> in it</a>.', result)
def test_doc_replace_references(self):
string = '@{$doc1} @{$doc1#abc} @{$doc1$link} @{$doc1#def$zelda} @{$do/c2}'
class DocInfo(object):
pass
doc1 = DocInfo()
doc1.title = 'Title1'
doc1.url = 'URL1'
doc2 = DocInfo()
doc2.title = 'Two words'
doc2.url = 'somewhere/else'
doc_index = {'doc1': doc1, 'do/c2': doc2}
visitor = DummyVisitor(index={}, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index=doc_index, py_module_names=['tf'])
result = reference_resolver.replace_references(string, 'python')
self.assertEqual('<a href="../URL1">Title1</a> '
'<a href="../URL1#abc">Title1</a> '
'<a href="../URL1">link</a> '
'<a href="../URL1#def">zelda</a> '
'<a href="../somewhere/else">Two words</a>', result)
def test_docs_for_class(self):
index = {
'TestClass': TestClass,
'TestClass.a_method': TestClass.a_method,
'TestClass.a_property': TestClass.a_property,
'TestClass.ChildClass': TestClass.ChildClass,
'TestClass.CLASS_MEMBER': TestClass.CLASS_MEMBER
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'TestClass': ['a_method', 'a_property', 'ChildClass', 'CLASS_MEMBER']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='TestClass', py_object=TestClass, parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(TestClass).split('\n')[0], page_info.doc.brief)
# Make sure the method is present
self.assertEqual(TestClass.a_method, page_info.methods[0].obj)
# Make sure that the signature is extracted properly and omits self.
self.assertEqual(["arg='default'"], page_info.methods[0].signature)
# Make sure the property is present
self.assertIs(TestClass.a_property, page_info.properties[0].obj)
# Make sure there is a link to the child class and it points the right way.
self.assertIs(TestClass.ChildClass, page_info.classes[0].obj)
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
def test_namedtuple_field_order(self):
namedtupleclass = collections.namedtuple('namedtupleclass',
{'z', 'y', 'x', 'w', 'v', 'u'})
index = {
'namedtupleclass': namedtupleclass,
'namedtupleclass.u': namedtupleclass.u,
'namedtupleclass.v': namedtupleclass.v,
'namedtupleclass.w': namedtupleclass.w,
'namedtupleclass.x': namedtupleclass.x,
'namedtupleclass.y': namedtupleclass.y,
'namedtupleclass.z': namedtupleclass.z,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {'namedtupleclass': {'u', 'v', 'w', 'x', 'y', 'z'}}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='namedtupleclass',
py_object=namedtupleclass,
parser_config=parser_config)
# Each namedtiple field has a docstring of the form:
# 'Alias for field number ##'. These props are returned sorted.
def sort_key(prop_info):
return int(prop_info.obj.__doc__.split(' ')[-1])
self.assertSequenceEqual(page_info.properties,
sorted(page_info.properties, key=sort_key))
def test_docs_for_class_should_skip(self):
class Parent(object):
@doc_controls.do_not_doc_inheritable
def a_method(self, arg='default'):
pass
class Child(Parent):
def a_method(self, arg='default'):
pass
index = {
'Child': Child,
'Child.a_method': Child.a_method,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'Child': ['a_method'],
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='Child', py_object=Child, parser_config=parser_config)
# Make sure the `a_method` is not present
self.assertEqual(0, len(page_info.methods))
def test_docs_for_message_class(self):
class CMessage(object):
def hidden(self):
pass
class Message(object):
def hidden2(self):
pass
class MessageMeta(object):
def hidden3(self):
pass
class ChildMessage(CMessage, Message, MessageMeta):
def my_method(self):
pass
index = {
'ChildMessage': ChildMessage,
'ChildMessage.hidden': ChildMessage.hidden,
'ChildMessage.hidden2': ChildMessage.hidden2,
'ChildMessage.hidden3': ChildMessage.hidden3,
'ChildMessage.my_method': ChildMessage.my_method,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {'ChildMessage': ['hidden', 'hidden2', 'hidden3', 'my_method']}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='ChildMessage',
py_object=ChildMessage,
parser_config=parser_config)
self.assertEqual(1, len(page_info.methods))
self.assertEqual('my_method', page_info.methods[0].short_name)
def test_docs_for_module(self):
index = {
'TestModule':
test_module,
'TestModule.test_function':
test_function,
'TestModule.test_function_with_args_kwargs':
test_function_with_args_kwargs,
'TestModule.TestClass':
TestClass,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'TestModule': ['TestClass', 'test_function',
'test_function_with_args_kwargs']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='TestModule',
py_object=test_module,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(test_module).split('\n')[0], page_info.doc.brief)
# Make sure that the members are there
funcs = {f_info.obj for f_info in page_info.functions}
self.assertEqual({test_function, test_function_with_args_kwargs}, funcs)
classes = {cls_info.obj for cls_info in page_info.classes}
self.assertEqual({TestClass}, classes)
# Make sure the module's file is contained as the definition location.
self.assertEqual(
os.path.relpath(test_module.__file__.rstrip('c'), '/'),
page_info.defined_in.path)
def test_docs_for_function(self):
index = {
'test_function': test_function
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'': ['test_function']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='test_function',
py_object=test_function,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(test_function).split('\n')[0], page_info.doc.brief)
# Make sure the extracted signature is good.
self.assertEqual(['unused_arg', "unused_kwarg='default'"],
page_info.signature)
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
def test_docs_for_function_with_kwargs(self):
index = {
'test_function_with_args_kwargs': test_function_with_args_kwargs
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'': ['test_function_with_args_kwargs']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='test_function_with_args_kwargs',
py_object=test_function_with_args_kwargs,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
tf_inspect.getdoc(test_function_with_args_kwargs).split('\n')[0],
page_info.doc.brief)
# Make sure the extracted signature is good.
self.assertEqual(['unused_arg', '*unused_args', '**unused_kwargs'],
page_info.signature)
def test_parse_md_docstring(self):
def test_function_with_fancy_docstring(arg):
"""Function with a fancy docstring.
And a bunch of references: @{tf.reference}, another @{tf.reference},
a member @{tf.reference.foo}, and a @{tf.third}.
Args:
arg: An argument.
Raises:
an exception
Returns:
arg: the input, and
arg: the input, again.
@compatibility(numpy)
NumPy has nothing as awesome as this function.
@end_compatibility
@compatibility(theano)
Theano has nothing as awesome as this function.
Check it out.
@end_compatibility
"""
return arg, arg
class HasOneMember(object):
def foo(self):
pass
duplicate_of = {'tf.third': 'tf.fourth'}
index = {
'tf': test_module,
'tf.fancy': test_function_with_fancy_docstring,
'tf.reference': HasOneMember,
'tf.reference.foo': HasOneMember.foo,
'tf.third': HasOneMember,
'tf.fourth': HasOneMember
}
visitor = DummyVisitor(index=index, duplicate_of=duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
doc_info = parser._parse_md_docstring(test_function_with_fancy_docstring,
'../..', reference_resolver)
self.assertNotIn('@', doc_info.docstring)
self.assertNotIn('compatibility', doc_info.docstring)
self.assertNotIn('Raises:', doc_info.docstring)
self.assertEqual(len(doc_info.function_details), 3)
self.assertEqual(set(doc_info.compatibility.keys()), {'numpy', 'theano'})
self.assertEqual(doc_info.compatibility['numpy'],
'NumPy has nothing as awesome as this function.\n')
def test_generate_index(self):
index = {
'tf': test_module,
'tf.TestModule': test_module,
'tf.test_function': test_function,
'tf.TestModule.test_function': test_function,
'tf.TestModule.TestClass': TestClass,
'tf.TestModule.TestClass.a_method': TestClass.a_method,
'tf.TestModule.TestClass.a_property': TestClass.a_property,
'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
}
duplicate_of = {'tf.TestModule.test_function': 'tf.test_function'}
visitor = DummyVisitor(index=index, duplicate_of=duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
docs = parser.generate_global_index('TestLibrary', index=index,
reference_resolver=reference_resolver)
# Make sure duplicates and non-top-level symbols are in the index, but
# methods and properties are not.
self.assertNotIn('a_method', docs)
self.assertNotIn('a_property', docs)
self.assertIn('TestModule.TestClass', docs)
self.assertIn('TestModule.TestClass.ChildClass', docs)
self.assertIn('TestModule.test_function', docs)
# Leading backtick to make sure it's included top-level.
# This depends on formatting, but should be stable.
self.assertIn('<code>tf.test_function', docs)
def test_argspec_for_functools_partial(self):
# pylint: disable=unused-argument
def test_function_for_partial1(arg1, arg2, kwarg1=1, kwarg2=2):
pass
def test_function_for_partial2(arg1, arg2, *my_args, **my_kwargs):
pass
# pylint: enable=unused-argument
# pylint: disable=protected-access
# Make sure everything works for regular functions.
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(1, 2),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(expected, parser._get_arg_spec(test_function_for_partial1))
# Make sure doing nothing works.
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(1, 2),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting args from the front works.
expected = tf_inspect.FullArgSpec(
args=['arg2', 'kwarg1', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(1, 2),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = tf_inspect.FullArgSpec(
args=['kwarg2'],
varargs=None,
varkw=None,
defaults=(2,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, 1, 2, 3)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting kwargs works.
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg2'],
varargs=None,
varkw=None,
defaults=(2,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, kwarg1=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = tf_inspect.FullArgSpec(
args=['arg1', 'arg2', 'kwarg1'],
varargs=None,
varkw=None,
defaults=(1,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = tf_inspect.FullArgSpec(
args=['arg1'],
varargs=None,
varkw=None,
defaults=(),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial1,
arg2=0, kwarg1=0, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure *args, *kwargs is accounted for.
expected = tf_inspect.FullArgSpec(
args=[],
varargs='my_args',
varkw='my_kwargs',
defaults=(),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
partial = functools.partial(test_function_for_partial2, 0, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# pylint: enable=protected-access
def testSaveReferenceResolver(self):
you_cant_serialize_this = object()
duplicate_of = {'AClass': ['AClass2']}
doc_index = {'doc': you_cant_serialize_this}
is_fragment = {
'tf': False,
'tf.VERSION': True,
'tf.AClass': False,
'tf.AClass.method': True,
'tf.AClass2': False,
'tf.function': False
}
py_module_names = ['tf', 'tfdbg']
resolver = parser.ReferenceResolver(duplicate_of, doc_index, is_fragment,
py_module_names)
outdir = googletest.GetTempDir()
filepath = os.path.join(outdir, 'resolver.json')
resolver.to_json_file(filepath)
resolver2 = parser.ReferenceResolver.from_json_file(filepath, doc_index)
# There are no __slots__, so all fields are visible in __dict__.
self.assertEqual(resolver.__dict__, resolver2.__dict__)
def testIsFreeFunction(self):
result = parser.is_free_function(test_function, 'test_module.test_function',
{'test_module': test_module})
self.assertTrue(result)
result = parser.is_free_function(test_function, 'TestClass.test_function',
{'TestClass': TestClass})
self.assertFalse(result)
result = parser.is_free_function(TestClass, 'TestClass', {})
self.assertFalse(result)
result = parser.is_free_function(test_module, 'test_module', {})
self.assertFalse(result)
RELU_DOC = """Computes rectified linear: `max(features, 0)`
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional)
Returns:
A `Tensor`. Has the same type as `features`
"""
class TestParseFunctionDetails(googletest.TestCase):
def test_parse_function_details(self):
docstring, function_details = parser._parse_function_details(RELU_DOC)
self.assertEqual(len(function_details), 2)
args = function_details[0]
self.assertEqual(args.keyword, 'Args')
self.assertEqual(len(args.header), 0)
self.assertEqual(len(args.items), 2)
self.assertEqual(args.items[0][0], 'features')
self.assertEqual(args.items[1][0], 'name')
self.assertEqual(args.items[1][1],
'A name for the operation (optional)\n\n')
returns = function_details[1]
self.assertEqual(returns.keyword, 'Returns')
relu_doc_lines = RELU_DOC.split('\n')
self.assertEqual(docstring, relu_doc_lines[0] + '\n\n')
self.assertEqual(returns.header, relu_doc_lines[-2] + '\n')
self.assertEqual(
RELU_DOC,
docstring + ''.join(str(detail) for detail in function_details))
class TestGenerateSignature(googletest.TestCase):
def test_known_object(self):
known_object = object()
reverse_index = {id(known_object): 'location.of.object.in.api'}
def example_fun(arg=known_object): # pylint: disable=unused-argument
pass
sig = parser._generate_signature(example_fun, reverse_index)
self.assertEqual(sig, ['arg=location.of.object.in.api'])
def test_literals(self):
if sys.version_info >= (3, 0):
print('Warning: Doc generation is not supported from python3.')
return
def example_fun(a=5, b=5.0, c=None, d=True, e='hello', f=(1, (2, 3))): # pylint: disable=g-bad-name, unused-argument
pass
sig = parser._generate_signature(example_fun, reverse_index={})
self.assertEqual(
sig, ['a=5', 'b=5.0', 'c=None', 'd=True', "e='hello'", 'f=(1, (2, 3))'])
def test_dotted_name(self):
if sys.version_info >= (3, 0):
print('Warning: Doc generation is not supported from python3.')
return
# pylint: disable=g-bad-name
class a(object):
class b(object):
class c(object):
class d(object):
def __init__(self, *args):
pass
# pylint: enable=g-bad-name
e = {'f': 1}
def example_fun(arg1=a.b.c.d, arg2=a.b.c.d(1, 2), arg3=e['f']): # pylint: disable=unused-argument
pass
sig = parser._generate_signature(example_fun, reverse_index={})
self.assertEqual(sig, ['arg1=a.b.c.d', 'arg2=a.b.c.d(1, 2)', "arg3=e['f']"])
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/parser_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import fnmatch
import os
import shutil
import tempfile
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
search_hints=True,
site_api_path=''):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_api_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Collect redirects for an api _redirects.yaml file.
redirects = []
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
parser.is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
# This function returns raw bytes in PY2 or unicode in PY3.
if search_hints:
content = [page_info.get_metadata_html()]
else:
content = ['']
content.append(pretty_docs.build_md_page(page_info))
text = '\n'.join(content)
if six.PY3:
text = text.encode('utf-8')
with open(path, 'wb') as f:
f.write(text)
except OSError:
raise OSError(
'Cannot write documentation for %s to %s' % (full_name, directory))
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
for dup in duplicates:
from_path = os.path.join(site_api_path, dup.replace('.', '/'))
to_path = os.path.join(site_api_path, full_name.replace('.', '/'))
redirects.append((
os.path.join('/', from_path),
os.path.join('/', to_path)))
if redirects:
redirects = sorted(redirects)
template = ('- from: {}\n'
' to: {}\n')
redirects = [template.format(f, t) for f, t in redirects]
api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
with open(api_redirects_path, 'w') as redirect_file:
redirect_file.write('redirects:\n')
redirect_file.write(''.join(redirects))
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
indent_num = module.count('.')
# Don't list `tf.submodule` inside `tf`
indent_num = max(indent_num, 1)
indent = ' '*indent_num
if indent_num > 1:
# tf.contrib.baysflow.entropy will be under
# tf.contrib->baysflow->entropy
title = module.split('.')[-1]
else:
title = module
header = [
'- title: ' + title,
' section:',
' - title: Overview',
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[module])]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
' path: ' + os.path.join('/', site_api_path,
symbol_to_file[full_name])]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
parser.generate_global_index(root_title, parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libraries in contrib from the documentation altogether.
def _get_default_private_map():
return {
'tf.contrib.autograph': ['utils', 'operators'],
'tf.test': ['mock'],
'tf.compat': ['v1', 'v2'],
'tf.contrib.estimator': ['python'],
}
# Exclude members of some libraries.
def _get_default_do_not_descend_map():
# TODO(markdaoust): Use docs_controls decorators, locally, instead.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
class DocControlsAwareCrawler(public_api.PublicAPIVisitor):
"""A `docs_controls` aware API-crawler."""
def _is_private(self, path, name, obj):
if doc_controls.should_skip(obj):
return True
return super(DocControlsAwareCrawler, self)._is_private(path, name, obj)
def extract(py_modules,
private_map,
do_not_descend_map,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = visitor_cls(py_modules[0][0])
api_visitor = DocControlsAwareCrawler(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
if title_parser.title is None:
msg = ('`{}` has no markdown title (# title)'.format(
os.path.join(dirpath, base_name)))
raise ValueError(msg)
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (('%s#%s' % (base_name, section_tag))
if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index the file and section of each `symbol` reference."""
for match in parser.AUTO_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit id tag.
"section" here refers to blocks delimited by second level headings.
"""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
def update_id_tags_inplace(src_dir):
"""Set explicit ids on all second-level headings to ensure back-links work.
Args:
src_dir: The directory of md-files to convert (inplace).
"""
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
for base_name in filenames:
if not base_name.endswith('.md'):
continue
full_path = os.path.join(src_dir, dirpath, base_name)
# Tag updater loads the file, makes the replacements, and returns the
# modified file contents
content = tag_updater.process(full_path)
with open(full_path, 'w') as f:
f.write(content)
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def replace_refs(src_dir,
output_dir,
reference_resolver,
file_pattern='*.md',
api_docs_relpath='api_docs'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
written to `output_dir`.
`{"__init__.py","OWNERS","README.txt"}` are skipped.
Files not matching `file_pattern` (using `fnmatch`) are copied with no change.
Also, files in the `api_guides/python` directory get explicit ids set on all
heading-2s to ensure back-links work.
Args:
src_dir: The directory to convert files from.
output_dir: The root directory to write the resulting files to.
reference_resolver: A `parser.ReferenceResolver` to make the replacements.
file_pattern: Only replace references in files matching file_patters,
using fnmatch. Non-matching files are copied unchanged.
api_docs_relpath: Relative-path string to the api_docs, from the src_dir.
"""
# Iterate through all the source files and process them.
for dirpath, _, filenames in os.walk(src_dir):
depth = os.path.relpath(src_dir, start=dirpath)
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.join(depth, api_docs_relpath, 'python')
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
continue
full_in_path = os.path.join(dirpath, base_name)
# Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
# Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
if full_in_path != full_out_path:
shutil.copyfile(full_in_path, full_out_path)
continue
with open(full_in_path, 'rb') as f:
content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(content.encode('utf-8'))
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
self.argument_parser.add_argument(
'--no_search_hints',
dest='search_hints',
action='store_false',
default=True)
self.argument_parser.add_argument(
'--site_api_path',
type=str, default='',
help='The path from the site-root to api_docs'
'directory for this project')
self.argument_parser.add_argument(
'--api_cache_out_path',
type=str,
default=None,
help='Path to store a json-serialized api-index, so links can be '
'inserted into docs without rebuilding the api_docs')
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=tempfile.mkdtemp(),
required=False,
help='Optional directory of source docs to add api_docs links to')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._private_map,
self._do_not_descend_map)
def build(self, flags):
"""Build all the docs.
This produces two outputs
python api docs:
* generated from modules set with `set_py_modules`.
* written to '{FLAGS.output_dir}/api_docs/python/'
non-api docs:
* Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
* '@{}' references in '.md' files are replaced with links.
* '.md' files under 'api_guides/python' have explicit ids set for their
second level headings.
Args:
flags:
* src_dir: Where to fetch the non-api-docs.
* base_dir: Base of the docs directory (Used to build correct
relative links).
* output_dir: Where to write the resulting docs.
Returns:
The number of errors encountered while processing.
"""
# Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
if getattr(flags, 'api_cache_out_path', None):
reference_resolver.to_json_file(flags.api_cache_out_path)
# Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
# Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title,
search_hints=getattr(flags, 'search_hints', True),
site_api_path=getattr(flags, 'site_api_path', ''))
# Replace all the @{} references in files under `FLAGS.src_dir`
replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
# Fix the tags in the guide dir.
guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
if os.path.exists(guide_dir):
update_id_tags_inplace(guide_dir)
# Report all errors found by the reference resolver, and return the error
# code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/generate_lib.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import functools
import itertools
import json
import os
import re
import astor
import six
from google.protobuf.message import Message as ProtoMessage
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
def is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method).
Args:
py_object: The the object in question.
full_name: The full name of the object, like `tf.module.symbol`.
index: The {full_name:py_object} dictionary for the public API.
Returns:
True if the obeject is a stand-alone function, and not part of a class
definition.
"""
if not tf_inspect.isfunction(py_object):
return False
parent_name = full_name.rsplit('.', 1)[0]
if tf_inspect.isclass(index[parent_name]):
return False
return True
# A regular expression capturing a python identifier.
IDENTIFIER_RE = r'[a-zA-Z_]\w*'
class TFDocsError(Exception):
pass
class _Errors(object):
"""A collection of errors."""
def __init__(self):
self._errors = []
def log_all(self):
"""Log all the collected errors to the standard error."""
template = 'ERROR:\n output file name: %s\n %s\n\n'
for full_name, message in self._errors:
logging.warn(template, full_name, message)
def append(self, full_name, message):
"""Add an error to the collection.
Args:
full_name: The path to the file in which the error occurred.
message: The message to display with the error.
"""
self._errors.append((full_name, message))
def __len__(self):
return len(self._errors)
def __eq__(self, other):
if not isinstance(other, _Errors):
return False
return self._errors == other._errors # pylint: disable=protected-access
def documentation_path(full_name, is_fragment=False):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
is_fragment: If `False` produce a direct markdown link (`tf.a.b.c` -->
`tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` -->
`tf/a/b.md#c`
Returns:
The file path to which to write the documentation for `full_name`.
"""
parts = full_name.split('.')
if is_fragment:
parts, fragment = parts[:-1], parts[-1]
result = os.path.join(*parts) + '.md'
if is_fragment:
result = result + '#' + fragment
return result
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
# For object instances, tf_inspect.getdoc does give us the docstring of their
# type, which is not what we want. Only return the docstring if it is useful.
if (tf_inspect.isclass(py_object) or tf_inspect.ismethod(py_object) or
tf_inspect.isfunction(py_object) or tf_inspect.ismodule(py_object) or
isinstance(py_object, property)):
return tf_inspect.getdoc(py_object) or ''
else:
return ''
# A regular expression for capturing a @{symbol} reference.
SYMBOL_REFERENCE_RE = re.compile(
r"""
# Start with a literal "@{".
@\{
# Group at least 1 symbol, not "}".
([^}]+)
# Followed by a closing "}"
\}
""",
flags=re.VERBOSE)
AUTO_REFERENCE_RE = re.compile(r'`([a-zA-Z0-9_.]+?)`')
class ReferenceResolver(object):
"""Class for replacing @{...} references with Markdown links.
Attributes:
current_doc_full_name: A string (or None) indicating the name of the
document currently being processed, so errors can reference the broken
doc.
"""
def __init__(self, duplicate_of, doc_index, is_fragment, py_module_names):
"""Initializes a Reference Resolver.
Args:
duplicate_of: A map from duplicate names to preferred names of API
symbols.
doc_index: A `dict` mapping symbol name strings to objects with `url`
and `title` fields. Used to resolve @{$doc} references in docstrings.
is_fragment: A map from full names to bool for each symbol. If True the
object lives at a page fragment `tf.a.b.c` --> `tf/a/b#c`. If False
object has a page to itself: `tf.a.b.c` --> `tf/a/b/c`.
py_module_names: A list of string names of Python modules.
"""
self._duplicate_of = duplicate_of
self._doc_index = doc_index
self._is_fragment = is_fragment
self._all_names = set(is_fragment.keys())
self._py_module_names = py_module_names
self.current_doc_full_name = None
self._errors = _Errors()
def add_error(self, message):
self._errors.append(self.current_doc_full_name, message)
def log_errors(self):
self._errors.log_all()
def num_errors(self):
return len(self._errors)
@classmethod
def from_visitor(cls, visitor, doc_index, **kwargs):
"""A factory function for building a ReferenceResolver from a visitor.
Args:
visitor: an instance of `DocGeneratorVisitor`
doc_index: a dictionary mapping document names to references objects with
"title" and "url" fields
**kwargs: all remaining args are passed to the constructor
Returns:
an instance of `ReferenceResolver` ()
"""
is_fragment = {}
for name, obj in visitor.index.items():
has_page = (
tf_inspect.isclass(obj) or tf_inspect.ismodule(obj) or
is_free_function(obj, name, visitor.index))
is_fragment[name] = not has_page
return cls(
duplicate_of=visitor.duplicate_of,
doc_index=doc_index,
is_fragment=is_fragment,
**kwargs)
@classmethod
def from_json_file(cls, filepath, doc_index):
with open(filepath) as f:
json_dict = json.load(f)
return cls(doc_index=doc_index, **json_dict)
def to_json_file(self, filepath):
"""Converts the RefenceResolver to json and writes it to the specified file.
Args:
filepath: The file path to write the json to.
"""
try:
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
json_dict = {}
for key, value in self.__dict__.items():
# Drop these two fields. `_doc_index` is not serializable. `_all_names` is
# generated by the constructor.
if key in ('_doc_index', '_all_names',
'_errors', 'current_doc_full_name'):
continue
# Strip off any leading underscores on field names as these are not
# recognized by the constructor.
json_dict[key.lstrip('_')] = value
with open(filepath, 'w') as f:
json.dump(json_dict, f, indent=2, sort_keys=True)
def replace_references(self, string, relative_path_to_root):
"""Replace "@{symbol}" references with links to symbol's documentation page.
This functions finds all occurrences of "@{symbol}" in `string`
and replaces them with markdown links to the documentation page
for "symbol".
`relative_path_to_root` is the relative path from the document
that contains the "@{symbol}" reference to the root of the API
documentation that is linked to. If the containing page is part of
the same API docset, `relative_path_to_root` can be set to
`os.path.dirname(documentation_path(name))`, where `name` is the
python name of the object whose documentation page the reference
lives on.
Args:
string: A string in which "@{symbol}" references should be replaced.
relative_path_to_root: The relative path from the containing document to
the root of the API documentation that is being linked to.
Returns:
`string`, with "@{symbol}" references replaced by Markdown links.
"""
def strict_one_ref(match):
try:
return self._one_ref(match, relative_path_to_root)
except TFDocsError as e:
self.add_error(e.message)
return 'BAD_LINK'
string = re.sub(SYMBOL_REFERENCE_RE, strict_one_ref, string)
def sloppy_one_ref(match):
try:
return self._one_ref(match, relative_path_to_root)
except TFDocsError:
return match.group(0)
string = re.sub(AUTO_REFERENCE_RE, sloppy_one_ref, string)
return string
def python_link(self, link_text, ref_full_name, relative_path_to_root,
code_ref=True):
"""Resolve a "@{python symbol}" reference to a Markdown link.
This will pick the canonical location for duplicate symbols. The
input to this function should already be stripped of the '@' and
'{}'. This function returns a Markdown link. If `code_ref` is
true, it is assumed that this is a code reference, so the link
text will be rendered as code (using backticks).
`link_text` should refer to a library symbol, starting with 'tf.'.
Args:
link_text: The text of the Markdown link.
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
code_ref: If true (the default), put `link_text` in `...`.
Returns:
A markdown link to the documentation page of `ref_full_name`.
"""
url = self.reference_to_url(ref_full_name, relative_path_to_root)
if code_ref:
link_text = link_text.join(['<code>', '</code>'])
else:
link_text = self._link_text_to_html(link_text)
return '<a href="{}">{}</a>'.format(url, link_text)
@staticmethod
def _link_text_to_html(link_text):
code_re = '`(.*?)`'
return re.sub(code_re, r'<code>\1</code>', link_text)
def py_master_name(self, full_name):
"""Return the master name for a Python symbol name."""
return self._duplicate_of.get(full_name, full_name)
def reference_to_url(self, ref_full_name, relative_path_to_root):
"""Resolve a "@{python symbol}" reference to a relative path.
The input to this function should already be stripped of the '@'
and '{}', and its output is only the link, not the full Markdown.
If `ref_full_name` is the name of a class member, method, or property, the
link will point to the page of the containing class, and it will include the
method name as an anchor. For example, `tf.module.MyClass.my_method` will be
translated into a link to
`os.join.path(relative_path_to_root, 'tf/module/MyClass.md#my_method')`.
Args:
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
Returns:
A relative path that links from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
Raises:
RuntimeError: If `ref_full_name` is not documented.
TFDocsError: If the @{} syntax cannot be decoded.
"""
master_name = self._duplicate_of.get(ref_full_name, ref_full_name)
# Check whether this link exists
if master_name not in self._all_names:
raise TFDocsError(
'Cannot make link to "%s": Not in index.' % master_name)
ref_path = documentation_path(master_name, self._is_fragment[master_name])
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, match, relative_path_to_root):
"""Return a link for a single "@{symbol}" reference."""
string = match.group(1)
# Look for link text after $.
dollar = string.rfind('$')
if dollar > 0: # Ignore $ in first character
link_text = string[dollar + 1:]
string = string[:dollar]
manual_link_text = True
else:
link_text = string
manual_link_text = False
# Handle different types of references.
if string.startswith('$'): # Doc reference
return self._doc_link(string, link_text, manual_link_text,
relative_path_to_root)
elif string.startswith('tensorflow::'):
# C++ symbol
return self._cc_link(string, link_text, manual_link_text,
relative_path_to_root)
else:
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(py_module_name + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(
link_text,
string,
relative_path_to_root,
code_ref=not manual_link_text)
# Error!
raise TFDocsError('Did not understand "%s"' % match.group(0),
'BROKEN_LINK')
def _doc_link(self, string, link_text, manual_link_text,
relative_path_to_root):
"""Generate a link for a @{$...} reference."""
string = string[1:] # remove leading $
# If string has a #, split that part into `hash_tag`
hash_pos = string.find('#')
if hash_pos > -1:
hash_tag = string[hash_pos:]
string = string[:hash_pos]
else:
hash_tag = ''
if string in self._doc_index:
if not manual_link_text: link_text = self._doc_index[string].title
url = os.path.normpath(os.path.join(
relative_path_to_root, '../..', self._doc_index[string].url))
link_text = self._link_text_to_html(link_text)
return '<a href="{}{}">{}</a>'.format(url, hash_tag, link_text)
return self._doc_missing(string, hash_tag, link_text, manual_link_text,
relative_path_to_root)
def _doc_missing(self, string, unused_hash_tag, unused_link_text,
unused_manual_link_text, unused_relative_path_to_root):
"""Generate an error for unrecognized @{$...} references."""
raise TFDocsError('Unknown Document "%s"' % string)
def _cc_link(self, string, link_text, unused_manual_link_text,
relative_path_to_root):
"""Generate a link for a @{tensorflow::...} reference."""
# TODO(josh11b): Fix this hard-coding of paths.
if string == 'tensorflow::ClientSession':
ret = 'class/tensorflow/client-session.md'
elif string == 'tensorflow::Scope':
ret = 'class/tensorflow/scope.md'
elif string == 'tensorflow::Status':
ret = 'class/tensorflow/status.md'
elif string == 'tensorflow::Tensor':
ret = 'class/tensorflow/tensor.md'
elif string == 'tensorflow::ops::Const':
ret = 'namespace/tensorflow/ops.md#const'
else:
raise TFDocsError('C++ reference not understood: "%s"' % string)
# relative_path_to_root gets you to api_docs/python, we go from there
# to api_docs/cc, and then add ret.
cc_relative_path = os.path.normpath(os.path.join(
relative_path_to_root, '../cc', ret))
return '<a href="{}"><code>{}</code></a>'.format(cc_relative_path,
link_text)
# TODO(aselle): Collect these into a big list for all modules and functions
# and make a rosetta stone page.
def _handle_compatibility(doc):
"""Parse and remove compatibility blocks from the main docstring.
Args:
doc: The docstring that contains compatibility notes"
Returns:
a tuple of the modified doc string and a hash that maps from compatibility
note type to the text of the note.
"""
compatibility_notes = {}
match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n'
r'((?:[^@\n]*\n)+)'
r'\s*@end_compatibility')
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _gen_pairs(items):
"""Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...].
Args:
items: A list of items (length must be even)
Yields:
The original items, in pairs
"""
assert len(items) % 2 == 0
items = iter(items)
while True:
try:
yield next(items), next(items)
except StopIteration:
return
class _FunctionDetail(
collections.namedtuple('_FunctionDetail', ['keyword', 'header', 'items'])):
"""A simple class to contain function details.
Composed of a "keyword", a possibly empty "header" string, and a possibly
empty
list of key-value pair "items".
"""
__slots__ = []
def __str__(self):
"""Return the original string that represents the function detail."""
parts = [self.keyword + ':\n']
parts.append(self.header)
for key, value in self.items:
parts.append(' ' + key + ': ')
parts.append(value)
return ''.join(parts)
def _parse_function_details(docstring):
r"""Given a docstring, split off the header and parse the function details.
For example the docstring of tf.nn.relu:
'''Computes rectified linear: `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
'''
This is parsed, and returned as:
```
('Computes rectified linear: `max(features, 0)`.\n\n', [
_FunctionDetail(
keyword='Args',
header='',
items=[
('features', ' A `Tensor`. Must be ...'),
('name', ' A name for the operation (optional).\n\n')]),
_FunctionDetail(
keyword='Returns',
header=' A `Tensor`. Has the same type as `features`.',
items=[])
])
```
Args:
docstring: The docstring to parse
Returns:
A (header, function_details) pair, where header is a string and
function_details is a (possibly empty) list of `_FunctionDetail` objects.
"""
detail_keywords = '|'.join([
'Args', 'Arguments', 'Fields', 'Returns', 'Yields', 'Raises', 'Attributes'
])
tag_re = re.compile('(?<=\n)(' + detail_keywords + '):\n', re.MULTILINE)
parts = tag_re.split(docstring)
# The first part is the main docstring
docstring = parts[0]
# Everything else alternates keyword-content
pairs = list(_gen_pairs(parts[1:]))
function_details = []
item_re = re.compile(r'^ ? ?(\*?\*?\w[\w.]*?\s*):\s', re.MULTILINE)
for keyword, content in pairs:
content = item_re.split(content)
header = content[0]
items = list(_gen_pairs(content[1:]))
function_details.append(_FunctionDetail(keyword, header, items))
return docstring, function_details
_DocstringInfo = collections.namedtuple('_DocstringInfo', [
'brief', 'docstring', 'function_details', 'compatibility'
])
def _parse_md_docstring(py_object, relative_path_to_root, reference_resolver):
"""Parse the object's docstring and return a `_DocstringInfo`.
This function clears @@'s from the docstring, and replaces @{} references
with markdown links.
For links within the same set of docs, the `relative_path_to_root` for a
docstring on the page for `full_name` can be set to:
```python
relative_path_to_root = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
```
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
relative_path_to_root: The relative path from the location of the current
document to the root of the Python API documentation. This is used to
compute links for "@{symbol}" references.
reference_resolver: An instance of ReferenceResolver.
Returns:
A _DocstringInfo object, all fields will be empty if no docstring was found.
"""
# TODO(wicke): If this is a partial, use the .func docstring and add a note.
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = reference_resolver.replace_references(
raw_docstring, relative_path_to_root)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in raw_docstring.split('\n') if not atat_re.match(line))
docstring, compatibility = _handle_compatibility(raw_docstring)
docstring, function_details = _parse_function_details(docstring)
if 'Generated by: tensorflow/tools/api/generator' in docstring:
docstring = ''
return _DocstringInfo(
docstring.split('\n')[0], docstring, function_details, compatibility)
def _get_arg_spec(func):
"""Extracts signature information from a function or functools.partial object.
For functions, uses `tf_inspect.getfullargspec`. For `functools.partial`
objects, corrects the signature of the underlying function to take into
account the removed arguments.
Args:
func: A function whose signature to extract.
Returns:
An `FullArgSpec` namedtuple `(args, varargs, varkw, defaults, etc.)`,
as returned by `tf_inspect.getfullargspec`.
"""
# getfullargspec does not work for functools.partial objects directly.
if isinstance(func, functools.partial):
argspec = tf_inspect.getfullargspec(func.func)
# Remove the args from the original function that have been used up.
first_default_arg = (
len(argspec.args or []) - len(argspec.defaults or []))
partial_args = len(func.args)
argspec_args = []
if argspec.args:
argspec_args = list(argspec.args[partial_args:])
argspec_defaults = list(argspec.defaults or ())
if argspec.defaults and partial_args > first_default_arg:
argspec_defaults = list(argspec.defaults[partial_args-first_default_arg:])
first_default_arg = max(0, first_default_arg - partial_args)
for kwarg in (func.keywords or []):
if kwarg in (argspec.args or []):
i = argspec_args.index(kwarg)
argspec_args.pop(i)
if i >= first_default_arg:
argspec_defaults.pop(i-first_default_arg)
else:
first_default_arg -= 1
return tf_inspect.FullArgSpec(
args=argspec_args,
varargs=argspec.varargs,
varkw=argspec.varkw,
defaults=tuple(argspec_defaults),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
else: # Regular function or method, getargspec will work fine.
return tf_inspect.getfullargspec(func)
def _remove_first_line_indent(string):
indent = len(re.match(r'^\s*', string).group(0))
return '\n'.join([line[indent:] for line in string.split('\n')])
PAREN_NUMBER_RE = re.compile(r'^\(([0-9.e-]+)\)')
def _generate_signature(func, reverse_index):
"""Given a function, returns a list of strings representing its args.
This function produces a list of strings representing the arguments to a
python function. It uses tf_inspect.getfullargspec, which
does not generalize well to Python 3.x, which is more flexible in how *args
and **kwargs are handled. This is not a problem in TF, since we have to remain
compatible to Python 2.7 anyway.
This function uses `__name__` for callables if it is available. This can lead
to poor results for functools.partial and other callable objects.
The returned string is Python code, so if it is included in a Markdown
document, it should be typeset as code (using backticks), or escaped.
Args:
func: A function, method, or functools.partial to extract the signature for.
reverse_index: A map from object ids to canonical full names to use.
Returns:
A list of strings representing the argument signature of `func` as python
code.
"""
args_list = []
argspec = _get_arg_spec(func)
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
# Python documentation skips `self` when printing method signatures.
# Note we cannot test for ismethod here since unbound methods do not register
# as methods (in Python 3).
first_arg = 1 if 'self' in argspec.args[:1] else 0
# Add all args without defaults.
for arg in argspec.args[first_arg:first_arg_with_default]:
args_list.append(arg)
# Add all args with defaults.
if argspec.defaults:
try:
source = _remove_first_line_indent(tf_inspect.getsource(func))
func_ast = ast.parse(source)
ast_defaults = func_ast.body[0].args.defaults
except IOError: # If this is a builtin, getsource fails with IOError
# If we cannot get the source, assume the AST would be equal to the repr
# of the defaults.
ast_defaults = [None] * len(argspec.defaults)
for arg, default, ast_default in zip(
argspec.args[first_arg_with_default:], argspec.defaults, ast_defaults):
if id(default) in reverse_index:
default_text = reverse_index[id(default)]
elif ast_default is not None:
default_text = (
astor.to_source(ast_default).rstrip('\n').replace('\t', '\\t')
.replace('\n', '\\n').replace('"""', "'"))
default_text = PAREN_NUMBER_RE.sub('\\1', default_text)
if default_text != repr(default):
# This may be an internal name. If so, handle the ones we know about.
# TODO(wicke): This should be replaced with a lookup in the index.
# TODO(wicke): (replace first ident with tf., check if in index)
internal_names = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE)
match = re.match(full_name_re, default_text)
if match:
lookup_text = default_text
for internal_name, public_name in six.iteritems(internal_names):
if match.group(0).startswith(internal_name):
lookup_text = public_name + default_text[len(internal_name):]
break
if default_text is lookup_text:
logging.warn(
'WARNING: Using default arg, failed lookup: %s, repr: %r',
default_text, default)
else:
default_text = lookup_text
else:
default_text = repr(default)
args_list.append('%s=%s' % (arg, default_text))
# Add *args and *kwargs.
if argspec.varargs:
args_list.append('*' + argspec.varargs)
if argspec.varkw:
args_list.append('**' + argspec.varkw)
return args_list
def _get_guides_markdown(duplicate_names, guide_index, relative_path):
all_guides = []
for name in duplicate_names:
all_guides.extend(guide_index.get(name, []))
if not all_guides: return ''
prefix = '../' * (relative_path.count('/') + 3)
links = sorted(set([guide_ref.make_md_link(prefix)
for guide_ref in all_guides]))
return 'See the guide%s: %s\n\n' % (
's' if len(links) > 1 else '', ', '.join(links))
def _get_defining_class(py_class, name):
for cls in tf_inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
class _LinkInfo(
collections.namedtuple(
'_LinkInfo', ['short_name', 'full_name', 'obj', 'doc', 'url'])):
__slots__ = []
def is_link(self):
return True
class _OtherMemberInfo(
collections.namedtuple('_OtherMemberInfo',
['short_name', 'full_name', 'obj', 'doc'])):
__slots__ = []
def is_link(self):
return False
_PropertyInfo = collections.namedtuple(
'_PropertyInfo', ['short_name', 'full_name', 'obj', 'doc'])
_MethodInfo = collections.namedtuple('_MethodInfo', [
'short_name', 'full_name', 'obj', 'doc', 'signature', 'decorators'
])
class _FunctionPageInfo(object):
"""Collects docs For a function Page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._signature = None
self._decorators = []
def for_function(self):
return True
def for_class(self):
return False
def for_module(self):
return False
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def signature(self):
return self._signature
def set_signature(self, function, reverse_index):
"""Attach the function's signature.
Args:
function: The python function being documented.
reverse_index: A map from object ids in the index to full names.
"""
assert self.signature is None
self._signature = _generate_signature(function, reverse_index)
@property
def decorators(self):
return list(self._decorators)
def add_decorator(self, dec):
self._decorators.append(dec)
def get_metadata_html(self):
return _Metadata(self.full_name).build_html()
class _ClassPageInfo(object):
"""Collects docs for a class page.
Attributes:
full_name: The fully qualified name of the object at the master
location. Aka `master_name`. For example: `tf.nn.sigmoid`.
short_name: The last component of the `full_name`. For example: `sigmoid`.
defined_in: The path to the file where this object is defined.
aliases: The list of all fully qualified names for the locations where the
object is visible in the public api. This includes the master location.
doc: A `_DocstringInfo` object representing the object's docstring (can be
created with `_parse_md_docstring`).
guides: A markdown string, of back links pointing to the api_guides that
reference this object.
bases: A list of `_LinkInfo` objects pointing to the docs for the parent
classes.
properties: A list of `_PropertyInfo` objects documenting the class'
properties (attributes that use `@property`).
methods: A list of `_MethodInfo` objects documenting the class' methods.
classes: A list of `_LinkInfo` objects pointing to docs for any nested
classes.
other_members: A list of `_OtherMemberInfo` objects documenting any other
object's defined inside the class object (mostly enum style fields).
"""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._namedtuplefields = None
self._bases = None
self._properties = []
self._methods = []
self._classes = []
self._other_members = []
def for_function(self):
"""Returns true if this object documents a function."""
return False
def for_class(self):
"""Returns true if this object documents a class."""
return True
def for_module(self):
"""Returns true if this object documents a module."""
return False
@property
def full_name(self):
"""Returns the documented object's fully qualified name."""
return self._full_name
@property
def short_name(self):
"""Returns the documented object's short name."""
return self._full_name.split('.')[-1]
@property
def defined_in(self):
"""Returns the path to the file where the documented object is defined."""
return self._defined_in
def set_defined_in(self, defined_in):
"""Sets the `defined_in` path."""
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
"""Returns a list of all full names for the documented object."""
return self._aliases
def set_aliases(self, aliases):
"""Sets the `aliases` list.
Args:
aliases: A list of strings. Containing all the object's full names.
"""
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
"""Returns a `_DocstringInfo` created from the object's docstring."""
return self._doc
def set_doc(self, doc):
"""Sets the `doc` field.
Args:
doc: An instance of `_DocstringInfo`.
"""
assert self.doc is None
self._doc = doc
@property
def guides(self):
"""Returns a markdown string containing backlinks to relevant api_guides."""
return self._guides
def set_guides(self, guides):
"""Sets the `guides` field.
Args:
guides: A markdown string containing backlinks to all the api_guides that
link to the documented object.
"""
assert self.guides is None
self._guides = guides
@property
def namedtuplefields(self):
return self._namedtuplefields
def set_namedtuplefields(self, py_class):
if issubclass(py_class, tuple):
if all(
hasattr(py_class, attr)
for attr in ('_asdict', '_fields', '_make', '_replace')):
self._namedtuplefields = py_class._fields
@property
def bases(self):
"""Returns a list of `_LinkInfo` objects pointing to the class' parents."""
return self._bases
def _set_bases(self, relative_path, parser_config):
"""Builds the `bases` attribute, to document this class' parent-classes.
This method sets the `bases` to a list of `_LinkInfo` objects point to the
doc pages for the class' parents.
Args:
relative_path: The relative path from the doc this object describes to
the documentation root.
parser_config: An instance of `ParserConfig`.
"""
bases = []
obj = parser_config.py_name_to_object(self.full_name)
for base in obj.__bases__:
base_full_name = parser_config.reverse_index.get(id(base), None)
if base_full_name is None:
continue
base_doc = _parse_md_docstring(base, relative_path,
parser_config.reference_resolver)
base_url = parser_config.reference_resolver.reference_to_url(
base_full_name, relative_path)
link_info = _LinkInfo(short_name=base_full_name.split('.')[-1],
full_name=base_full_name, obj=base,
doc=base_doc, url=base_url)
bases.append(link_info)
self._bases = bases
@property
def properties(self):
"""Returns a list of `_PropertyInfo` describing the class' properties."""
props_dict = {prop.short_name: prop for prop in self._properties}
props = []
if self.namedtuplefields:
for field in self.namedtuplefields:
props.append(props_dict.pop(field))
props.extend(sorted(props_dict.values()))
return props
def _add_property(self, short_name, full_name, obj, doc):
"""Adds a `_PropertyInfo` entry to the `properties` list.
Args:
short_name: The property's short name.
full_name: The property's fully qualified name.
obj: The property object itself
doc: The property's parsed docstring, a `_DocstringInfo`.
"""
# Hide useless namedtuple docs-trings
if re.match('Alias for field number [0-9]+', doc.docstring):
doc = doc._replace(docstring='', brief='')
property_info = _PropertyInfo(short_name, full_name, obj, doc)
self._properties.append(property_info)
@property
def methods(self):
"""Returns a list of `_MethodInfo` describing the class' methods."""
return self._methods
def _add_method(self, short_name, full_name, obj, doc, signature, decorators):
"""Adds a `_MethodInfo` entry to the `methods` list.
Args:
short_name: The method's short name.
full_name: The method's fully qualified name.
obj: The method object itself
doc: The method's parsed docstring, a `_DocstringInfo`
signature: The method's parsed signature (see: `_generate_signature`)
decorators: A list of strings describing the decorators that should be
mentioned on the object's docs page.
"""
method_info = _MethodInfo(short_name, full_name, obj, doc, signature,
decorators)
self._methods.append(method_info)
@property
def classes(self):
"""Returns a list of `_LinkInfo` pointing to any nested classes."""
return self._classes
def get_metadata_html(self):
meta_data = _Metadata(self.full_name)
for item in itertools.chain(self.classes, self.properties, self.methods,
self.other_members):
meta_data.append(item)
return meta_data.build_html()
def _add_class(self, short_name, full_name, obj, doc, url):
"""Adds a `_LinkInfo` for a nested class to `classes` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
url: A url pointing to where the nested class is documented.
"""
page_info = _LinkInfo(short_name, full_name, obj, doc, url)
self._classes.append(page_info)
@property
def other_members(self):
"""Returns a list of `_OtherMemberInfo` describing any other contents."""
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
"""Adds an `_OtherMemberInfo` entry to the `other_members` list.
Args:
short_name: The class' short name.
full_name: The class' fully qualified name.
obj: The class object itself
doc: The class' parsed docstring, a `_DocstringInfo`
"""
other_member_info = _OtherMemberInfo(short_name, full_name, obj, doc)
self._other_members.append(other_member_info)
def collect_docs_for_class(self, py_class, parser_config):
"""Collects information necessary specifically for a class's doc page.
Mainly, this is details about the class's members.
Args:
py_class: The class object being documented
parser_config: An instance of ParserConfig.
"""
self.set_namedtuplefields(py_class)
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
self._set_bases(relative_path, parser_config)
for short_name in parser_config.tree[self.full_name]:
# Remove builtin members that we never want to document.
if short_name in [
'__class__', '__base__', '__weakref__', '__doc__', '__module__',
'__dict__', '__abstractmethods__', '__slots__', '__getnewargs__',
'__str__', '__repr__', '__hash__', '__reduce__'
]:
continue
child_name = '.'.join([self.full_name, short_name])
child = parser_config.py_name_to_object(child_name)
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, short_name)
if defining_class in [object, type, tuple, BaseException, Exception]:
continue
# The following condition excludes most protobuf-defined symbols.
if (defining_class and
defining_class.__name__ in ['CMessage', 'Message', 'MessageMeta']):
continue
# TODO(markdaoust): Add a note in child docs showing the defining class.
if doc_controls.should_skip_class_attr(py_class, short_name):
continue
child_doc = _parse_md_docstring(child, relative_path,
parser_config.reference_resolver)
if isinstance(child, property):
self._add_property(short_name, child_name, child, child_doc)
elif tf_inspect.isclass(child):
if defining_class is None:
continue
url = parser_config.reference_resolver.reference_to_url(
child_name, relative_path)
self._add_class(short_name, child_name, child, child_doc, url)
elif (tf_inspect.ismethod(child) or tf_inspect.isfunction(child) or
tf_inspect.isroutine(child)):
if defining_class is None:
continue
# Omit methods defined by namedtuple.
original_method = defining_class.__dict__[short_name]
if (hasattr(original_method, '__module__') and
(original_method.__module__ or '').startswith('namedtuple')):
continue
# Some methods are often overridden without documentation. Because it's
# obvious what they do, don't include them in the docs if there's no
# docstring.
if not child_doc.brief.strip() and short_name in [
'__del__', '__copy__'
]:
continue
try:
child_signature = _generate_signature(child,
parser_config.reverse_index)
except TypeError:
# If this is a (dynamically created) slot wrapper, tf_inspect will
# raise typeerror when trying to get to the code. Ignore such
# functions.
continue
child_decorators = []
try:
if isinstance(py_class.__dict__[short_name], classmethod):
child_decorators.append('classmethod')
except KeyError:
pass
try:
if isinstance(py_class.__dict__[short_name], staticmethod):
child_decorators.append('staticmethod')
except KeyError:
pass
self._add_method(short_name, child_name, child, child_doc,
child_signature, child_decorators)
else:
# Exclude members defined by protobuf that are useless
if issubclass(py_class, ProtoMessage):
if (short_name.endswith('_FIELD_NUMBER') or
short_name in ['__slots__', 'DESCRIPTOR']):
continue
# TODO(wicke): We may want to also remember the object itself.
self._add_other_member(short_name, child_name, child, child_doc)
class _ModulePageInfo(object):
"""Collects docs for a module page."""
def __init__(self, full_name):
self._full_name = full_name
self._defined_in = None
self._aliases = None
self._doc = None
self._guides = None
self._modules = []
self._classes = []
self._functions = []
self._other_members = []
def for_function(self):
return False
def for_class(self):
return False
def for_module(self):
return True
@property
def full_name(self):
return self._full_name
@property
def short_name(self):
return self._full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self):
return self._doc
def set_doc(self, doc):
assert self.doc is None
self._doc = doc
@property
def guides(self):
return self._guides
def set_guides(self, guides):
assert self.guides is None
self._guides = guides
@property
def modules(self):
return self._modules
def _add_module(self, short_name, full_name, obj, doc, url):
self._modules.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def classes(self):
return self._classes
def _add_class(self, short_name, full_name, obj, doc, url):
self._classes.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def functions(self):
return self._functions
def _add_function(self, short_name, full_name, obj, doc, url):
self._functions.append(_LinkInfo(short_name, full_name, obj, doc, url))
@property
def other_members(self):
return self._other_members
def _add_other_member(self, short_name, full_name, obj, doc):
self._other_members.append(
_OtherMemberInfo(short_name, full_name, obj, doc))
def get_metadata_html(self):
meta_data = _Metadata(self.full_name)
# Objects with their own pages are not added to the matadata list for the
# module, the module only has a link to the object page. No docs.
for item in self.other_members:
meta_data.append(item)
return meta_data.build_html()
def collect_docs_for_module(self, parser_config):
"""Collect information necessary specifically for a module's doc page.
Mainly this is information about the members of the module.
Args:
parser_config: An instance of ParserConfig.
"""
relative_path = os.path.relpath(
path='.',
start=os.path.dirname(documentation_path(self.full_name)) or '.')
member_names = parser_config.tree.get(self.full_name, [])
for name in member_names:
if name in ['__builtins__', '__doc__', '__file__',
'__name__', '__path__', '__package__',
'__cached__', '__loader__', '__spec__']:
continue
member_full_name = self.full_name + '.' + name if self.full_name else name
member = parser_config.py_name_to_object(member_full_name)
member_doc = _parse_md_docstring(member, relative_path,
parser_config.reference_resolver)
url = parser_config.reference_resolver.reference_to_url(
member_full_name, relative_path)
if tf_inspect.ismodule(member):
self._add_module(name, member_full_name, member, member_doc, url)
elif tf_inspect.isclass(member):
self._add_class(name, member_full_name, member, member_doc, url)
elif tf_inspect.isfunction(member):
self._add_function(name, member_full_name, member, member_doc, url)
else:
self._add_other_member(name, member_full_name, member, member_doc)
class ParserConfig(object):
"""Stores all indexes required to parse the docs."""
def __init__(self, reference_resolver, duplicates, duplicate_of, tree, index,
reverse_index, guide_index, base_dir):
"""Object with the common config for docs_for_object() calls.
Args:
reference_resolver: An instance of ReferenceResolver.
duplicates: A `dict` mapping fully qualified names to a set of all
aliases of this name. This is used to automatically generate a list of
all aliases for each name.
duplicate_of: A map from duplicate names to preferred names of API
symbols.
tree: A `dict` mapping a fully qualified name to the names of all its
members. Used to populate the members section of a class or module page.
index: A `dict` mapping full names to objects.
reverse_index: A `dict` mapping object ids to full names.
guide_index: A `dict` mapping symbol name strings to objects with a
`make_md_link()` method.
base_dir: A base path that is stripped from file locations written to the
docs.
"""
self.reference_resolver = reference_resolver
self.duplicates = duplicates
self.duplicate_of = duplicate_of
self.tree = tree
self.reverse_index = reverse_index
self.index = index
self.guide_index = guide_index
self.base_dir = base_dir
self.defined_in_prefix = 'tensorflow/'
self.code_url_prefix = (
'/code/stable/tensorflow/') # pylint: disable=line-too-long
def py_name_to_object(self, full_name):
"""Return the Python object for a Python symbol name."""
return self.index[full_name]
def docs_for_object(full_name, py_object, parser_config):
"""Return a PageInfo object describing a given object from the TF API.
This function uses _parse_md_docstring to parse the docs pertaining to
`object`.
This function resolves '@{symbol}' references in the docstrings into links to
the appropriate location. It also adds a list of alternative names for the
symbol automatically.
It assumes that the docs for each object live in a file given by
`documentation_path`, and that relative links to files within the
documentation are resolvable.
Args:
full_name: The fully qualified name of the symbol to be
documented.
py_object: The Python object to be documented. Its documentation is sourced
from `py_object`'s docstring.
parser_config: A ParserConfig object.
Returns:
Either a `_FunctionPageInfo`, `_ClassPageInfo`, or a `_ModulePageInfo`
depending on the type of the python object being documented.
Raises:
RuntimeError: If an object is encountered for which we don't know how
to make docs.
"""
# Which other aliases exist for the object referenced by full_name?
master_name = parser_config.reference_resolver.py_master_name(full_name)
duplicate_names = parser_config.duplicates.get(master_name, [full_name])
# TODO(wicke): Once other pieces are ready, enable this also for partials.
if (tf_inspect.ismethod(py_object) or tf_inspect.isfunction(py_object) or
# Some methods in classes from extensions come in as routines.
tf_inspect.isroutine(py_object)):
page_info = _FunctionPageInfo(master_name)
page_info.set_signature(py_object, parser_config.reverse_index)
elif tf_inspect.isclass(py_object):
page_info = _ClassPageInfo(master_name)
page_info.collect_docs_for_class(py_object, parser_config)
elif tf_inspect.ismodule(py_object):
page_info = _ModulePageInfo(master_name)
page_info.collect_docs_for_module(parser_config)
else:
raise RuntimeError('Cannot make docs for object %s: %r' % (full_name,
py_object))
relative_path = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
page_info.set_doc(_parse_md_docstring(
py_object, relative_path, parser_config.reference_resolver))
page_info.set_aliases(duplicate_names)
page_info.set_guides(_get_guides_markdown(
duplicate_names, parser_config.guide_index, relative_path))
page_info.set_defined_in(_get_defined_in(py_object, parser_config))
return page_info
class _PythonBuiltin(object):
"""This class indicated that the object in question is a python builtin.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def is_builtin(self):
return True
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'This is an alias for a Python built-in.\n\n'
class _PythonFile(object):
"""This class indicates that the object is defined in a regular python file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return True
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _ProtoFile(object):
"""This class indicates that the object is defined in a .proto file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
self.code_url_prefix = parser_config.code_url_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return False
def __str__(self):
return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format(
path=self.path, prefix=self.path_prefix,
code_prefix=self.code_url_prefix)
class _GeneratedFile(object):
"""This class indicates that the object is defined in a generated python file.
Generated files should not be linked to directly.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
def __init__(self, path, parser_config):
self.path = path
self.path_prefix = parser_config.defined_in_prefix
def is_builtin(self):
return False
def is_python_file(self):
return False
def is_generated_file(self):
return True
def __str__(self):
return 'Defined in generated file: `%s%s`.\n\n' % (self.path_prefix,
self.path)
def _get_defined_in(py_object, parser_config):
"""Returns a description of where the passed in python object was defined.
Args:
py_object: The Python object.
parser_config: A ParserConfig object.
Returns:
Either a `_PythonBuiltin`, `_PythonFile`, or a `_GeneratedFile`
"""
# Every page gets a note about where this object is defined
# TODO(wicke): If py_object is decorated, get the decorated object instead.
# TODO(wicke): Only use decorators that support this in TF.
try:
path = os.path.relpath(path=tf_inspect.getfile(py_object),
start=parser_config.base_dir)
except TypeError: # getfile throws TypeError if py_object is a builtin.
return _PythonBuiltin()
# TODO(wicke): If this is a generated file, link to the source instead.
# TODO(wicke): Move all generated files to a generated/ directory.
# TODO(wicke): And make their source file predictable from the file name.
# In case this is compiled, point to the original
if path.endswith('.pyc'):
path = path[:-1]
# Never include links outside this code base.
if path.startswith('..') or re.search(r'\b_api\b', path):
return None
if re.match(r'.*/gen_[^/]*\.py$', path):
return _GeneratedFile(path, parser_config)
if 'genfiles' in path or 'tools/api/generator' in path:
return _GeneratedFile(path, parser_config)
elif re.match(r'.*_pb2\.py$', path):
# The _pb2.py files all appear right next to their defining .proto file.
return _ProtoFile(path[:-7] + '.proto', parser_config)
else:
return _PythonFile(path, parser_config)
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
reference_resolver: An instance of ReferenceResolver.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in six.iteritems(index):
if (tf_inspect.ismodule(py_object) or tf_inspect.isfunction(py_object) or
tf_inspect.isclass(py_object)):
# In Python 3, unbound methods are functions, so eliminate those.
if tf_inspect.isfunction(py_object):
if full_name.count('.') == 0:
parent_name = ''
else:
parent_name = full_name[:full_name.rfind('.')]
if parent_name in index and tf_inspect.isclass(index[parent_name]):
# Skip methods (=functions with class parents).
continue
symbol_links.append((
full_name, reference_resolver.python_link(full_name, full_name, '.')))
lines = ['# All symbols in %s' % library_name, '']
for _, link in sorted(symbol_links, key=lambda x: x[0]):
lines.append('* %s' % link)
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
class _Metadata(object):
"""A class for building a page's Metadata block.
Attributes:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
def __init__(self, name, version='Stable'):
"""Creates a Metadata builder.
Args:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
self.name = name
self.version = version
self._content = []
def append(self, item):
"""Adds an item from the page to the Metadata block.
Args:
item: The parsed page section to add.
"""
self._content.append(item.short_name)
def build_html(self):
"""Returns the Metadata block as an Html string."""
schema = 'http://developers.google.com/ReferenceObject'
parts = ['<div itemscope itemtype="%s">' % schema]
parts.append('<meta itemprop="name" content="%s" />' % self.name)
parts.append('<meta itemprop="path" content="%s" />' % self.version)
for item in self._content:
parts.append('<meta itemprop="property" content="%s"/>' % item)
parts.extend(['</div>', ''])
return '\n'.join(parts)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/parser.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for documentation control decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import doc_controls
class DocControlsTest(googletest.TestCase):
def test_do_not_generate_docs(self):
@doc_controls.do_not_generate_docs
def dummy_function():
pass
self.assertTrue(doc_controls.should_skip(dummy_function))
def test_do_not_doc_on_method(self):
"""The simple decorator is not aware of inheritance."""
class Parent(object):
@doc_controls.do_not_generate_docs
def my_method(self):
pass
class Child(Parent):
def my_method(self):
pass
class GrandChild(Child):
pass
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertFalse(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertFalse(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_do_not_doc_inheritable(self):
class Parent(object):
@doc_controls.do_not_doc_inheritable
def my_method(self):
pass
class Child(Parent):
def my_method(self):
pass
class GrandChild(Child):
pass
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_do_not_doc_inheritable_property(self):
class Parent(object):
@property
@doc_controls.do_not_doc_inheritable
def my_method(self):
pass
class Child(Parent):
@property
def my_method(self):
pass
class GrandChild(Child):
pass
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_do_not_doc_inheritable_staticmethod(self):
class GrandParent(object):
def my_method(self):
pass
class Parent(GrandParent):
@staticmethod
@doc_controls.do_not_doc_inheritable
def my_method():
pass
class Child(Parent):
@staticmethod
def my_method():
pass
class GrandChild(Child):
pass
self.assertFalse(doc_controls.should_skip(GrandParent.my_method))
self.assertTrue(doc_controls.should_skip(Parent.my_method))
self.assertFalse(doc_controls.should_skip(Child.my_method))
self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
self.assertFalse(
doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
def test_for_subclass_implementers(self):
class GrandParent(object):
def my_method(self):
pass
class Parent(GrandParent):
@doc_controls.for_subclass_implementers
def my_method(self):
pass
class Child(Parent):
pass
class GrandChild(Child):
def my_method(self):
pass
class Grand2Child(Child):
pass
self.assertFalse(
doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
self.assertFalse(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(Grand2Child, 'my_method'))
def test_for_subclass_implementers_short_circuit(self):
class GrandParent(object):
@doc_controls.for_subclass_implementers
def my_method(self):
pass
class Parent(GrandParent):
def my_method(self):
pass
class Child(Parent):
@doc_controls.do_not_doc_inheritable
def my_method(self):
pass
class GrandChild(Child):
@doc_controls.for_subclass_implementers
def my_method(self):
pass
class Grand2Child(Child):
pass
self.assertFalse(
doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
self.assertFalse(
doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
self.assertTrue(
doc_controls.should_skip_class_attr(Grand2Child, 'my_method'))
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/doc_controls_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_guide_parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.platform import test
from tensorflow.tools.docs import py_guide_parser
class TestPyGuideParser(py_guide_parser.PyGuideParser):
def __init__(self):
self.calls = []
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, line_number, title):
self.calls.append((line_number, 't', title))
def process_section(self, line_number, section_title, tag):
self.calls.append((line_number, 's', '%s : %s' % (section_title, tag)))
def process_in_blockquote(self, line_number, line):
self.calls.append((line_number, 'b', line))
self.replace_line(line_number, line + ' BQ')
def process_line(self, line_number, line):
self.calls.append((line_number, 'l', line))
class PyGuideParserTest(test.TestCase):
def testBasics(self):
tmp = os.path.join(test.get_temp_dir(), 'py_guide_parser_test.md')
f = open(tmp, 'w')
f.write("""# a title
a line
## a section
```shell
in a blockquote
```
out of blockquote
""")
f.close()
parser = TestPyGuideParser()
result = parser.process(tmp)
expected = """# a title
a line
## a section
```shell BQ
in a blockquote BQ
```
out of blockquote
"""
self.assertEqual(expected, result)
expected = [(0, 't', 'a title'),
(1, 'l', 'a line'),
(2, 's', 'a section : a_section'),
(3, 'b', '```shell'),
(4, 'b', 'in a blockquote'),
(5, 'l', '```'),
(6, 'l', 'out of blockquote'),
(7, 'l', '')]
self.assertEqual(expected, parser.calls)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/py_guide_parser_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for doc generator traversal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import generate_lib
from tensorflow.tools.docs import parser
def test_function():
"""Docstring for test_function."""
pass
class TestClass(object):
"""Docstring for TestClass itself."""
class ChildClass(object):
"""Docstring for a child class."""
class GrandChildClass(object):
"""Docstring for a child of a child class."""
pass
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class GenerateTest(googletest.TestCase):
def get_test_objects(self):
# These are all mutable objects, so rebuild them for each test.
# Don't cache the objects.
module = sys.modules[__name__]
index = {
'tf': sys, # Can be any module, this test doesn't care about content.
'tf.TestModule': module,
'tf.test_function': test_function,
'tf.TestModule.test_function': test_function,
'tf.TestModule.TestClass': TestClass,
'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
'tf.TestModule.TestClass.ChildClass.GrandChildClass':
TestClass.ChildClass.GrandChildClass,
}
tree = {
'tf': ['TestModule', 'test_function'],
'tf.TestModule': ['test_function', 'TestClass'],
'tf.TestModule.TestClass': ['ChildClass'],
'tf.TestModule.TestClass.ChildClass': ['GrandChildClass'],
'tf.TestModule.TestClass.ChildClass.GrandChildClass': []
}
duplicate_of = {'tf.test_function': 'tf.TestModule.test_function'}
duplicates = {
'tf.TestModule.test_function': [
'tf.test_function', 'tf.TestModule.test_function'
]
}
base_dir = os.path.dirname(__file__)
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=duplicates,
duplicate_of=duplicate_of,
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir=base_dir)
return reference_resolver, parser_config
def test_write(self):
_, parser_config = self.get_test_objects()
output_dir = googletest.GetTempDir()
generate_lib.write_docs(output_dir, parser_config, yaml_toc=True,
site_api_path='api_docs/python')
# Check redirects
redirects_file = os.path.join(output_dir, '_redirects.yaml')
self.assertTrue(os.path.exists(redirects_file))
with open(redirects_file) as f:
redirects = f.read()
self.assertEqual(redirects.split(), [
'redirects:', '-', 'from:', '/api_docs/python/tf/test_function', 'to:',
'/api_docs/python/tf/TestModule/test_function'
])
# Make sure that the right files are written to disk.
self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, 'tf.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, '_toc.yaml')))
self.assertTrue(
os.path.exists(os.path.join(output_dir, 'tf/TestModule.md')))
self.assertFalse(
os.path.exists(os.path.join(output_dir, 'tf/test_function.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/TestClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir,
'tf/TestModule/TestClass/ChildClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(
output_dir,
'tf/TestModule/TestClass/ChildClass/GrandChildClass.md')))
# Make sure that duplicates are not written
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/test_function.md')))
def test_update_id_tags_inplace(self):
test_dir = googletest.GetTempDir()
test_sub_dir = os.path.join(test_dir, 'a/b')
os.makedirs(test_sub_dir)
test_path1 = os.path.join(test_dir, 'file1.md')
test_path2 = os.path.join(test_sub_dir, 'file2.md')
test_path3 = os.path.join(test_sub_dir, 'file3.notmd')
with open(test_path1, 'w') as f:
f.write('## abc&123')
with open(test_path2, 'w') as f:
f.write('# A Level 1 Heading\n')
f.write('## A Level 2 Heading')
with open(test_path3, 'w') as f:
f.write("## don\'t change this")
generate_lib.update_id_tags_inplace(test_dir)
with open(test_path1) as f:
content = f.read()
self.assertEqual(content, '<h2 id="abc_123">abc&123</h2>')
with open(test_path2) as f:
content = f.read()
self.assertEqual(
content, '# A Level 1 Heading\n'
'<h2 id="A_Level_2_Heading">A Level 2 Heading</h2>')
with open(test_path3) as f:
content = f.read()
self.assertEqual(content, "## don\'t change this")
def test_replace_refes(self):
test_dir = googletest.GetTempDir()
test_in_dir = os.path.join(test_dir, 'in')
test_in_dir_a = os.path.join(test_dir, 'in/a')
test_in_dir_b = os.path.join(test_dir, 'in/b')
os.makedirs(test_in_dir)
os.makedirs(test_in_dir_a)
os.makedirs(test_in_dir_b)
test_out_dir = os.path.join(test_dir, 'out')
os.makedirs(test_out_dir)
test_path1 = os.path.join(test_in_dir_a, 'file1.md')
test_path2 = os.path.join(test_in_dir_b, 'file2.md')
test_path3 = os.path.join(test_in_dir_b, 'file3.notmd')
test_path4 = os.path.join(test_in_dir_b, 'OWNERS')
with open(test_path1, 'w') as f:
f.write('Use `tf.test_function` to test things.')
with open(test_path2, 'w') as f:
f.write('Use @{tf.TestModule.TestClass.ChildClass} to test things.\n'
"`tf.whatever` doesn't exist")
with open(test_path3, 'w') as f:
file3_content = (
'Not a .md file. Should be copied unchanged:'
'@{tf.TestModule.TestClass.ChildClass}, `tf.test_function`')
f.write(file3_content)
with open(test_path4, 'w') as f:
f.write('')
reference_resolver, _ = self.get_test_objects()
generate_lib.replace_refs(test_in_dir, test_out_dir, reference_resolver,
'*.md')
with open(os.path.join(test_out_dir, 'a/file1.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use <a href="../api_docs/python/tf/TestModule/test_function.md">'
'<code>tf.test_function</code></a> to test things.')
with open(os.path.join(test_out_dir, 'b/file2.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use '
'<a href="../api_docs/python/tf/TestModule/TestClass/ChildClass.md">'
'<code>tf.TestModule.TestClass.ChildClass</code></a> '
'to test things.\n'
'`tf.whatever` doesn\'t exist')
with open(os.path.join(test_out_dir, 'b/file3.notmd')) as f:
content = f.read()
self.assertEqual(content, file3_content)
with self.assertRaises(IOError):
# This should fail. The OWNERS file should not be copied
with open(os.path.join(test_out_dir, 'b/OWNERS')) as f:
content = f.read()
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/docs/generate_lib_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smoke test for reading records from GCS to TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.python.lib.io import file_io
flags = tf.app.flags
flags.DEFINE_string("gcs_bucket_url", "",
"The URL to the GCS bucket in which the temporary "
"tfrecord file is to be written and read, e.g., "
"gs://my-gcs-bucket/test-directory")
flags.DEFINE_integer("num_examples", 10, "Number of examples to generate")
FLAGS = flags.FLAGS
def create_examples(num_examples, input_mean):
"""Create ExampleProto's containing data."""
ids = np.arange(num_examples).reshape([num_examples, 1])
inputs = np.random.randn(num_examples, 1) + input_mean
target = inputs - input_mean
examples = []
for row in range(num_examples):
ex = example_pb2.Example()
ex.features.feature["id"].bytes_list.value.append(str(ids[row, 0]))
ex.features.feature["target"].float_list.value.append(target[row, 0])
ex.features.feature["inputs"].float_list.value.append(inputs[row, 0])
examples.append(ex)
return examples
def create_dir_test():
"""Verifies file_io directory handling methods."""
# Test directory creation.
starttime_ms = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s" % dir_name)
file_io.create_dir(dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created directory in: %d milliseconds" % elapsed_ms)
# Check that the directory exists.
dir_exists = file_io.is_directory(dir_name)
assert dir_exists
print("%s directory exists: %s" % (dir_name, dir_exists))
# Test recursive directory creation.
starttime_ms = int(round(time.time() * 1000))
recursive_dir_name = "%s/%s/%s" % (dir_name,
"nested_dir1",
"nested_dir2")
print("Creating recursive dir %s" % recursive_dir_name)
file_io.recursive_create_dir(recursive_dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created directory recursively in: %d milliseconds" % elapsed_ms)
# Check that the directory exists.
recursive_dir_exists = file_io.is_directory(recursive_dir_name)
assert recursive_dir_exists
print("%s directory exists: %s" % (recursive_dir_name, recursive_dir_exists))
# Create some contents in the just created directory and list the contents.
num_files = 10
files_to_create = ["file_%d.txt" % n for n in range(num_files)]
for file_num in files_to_create:
file_name = "%s/%s" % (dir_name, file_num)
print("Creating file %s." % file_name)
file_io.write_string_to_file(file_name, "test file.")
print("Listing directory %s." % dir_name)
starttime_ms = int(round(time.time() * 1000))
directory_contents = file_io.list_directory(dir_name)
print(directory_contents)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed directory %s in %s milliseconds" % (dir_name, elapsed_ms))
assert set(directory_contents) == set(files_to_create + ["nested_dir1/"])
# Test directory renaming.
dir_to_rename = "%s/old_dir" % dir_name
new_dir_name = "%s/new_dir" % dir_name
file_io.create_dir(dir_to_rename)
assert file_io.is_directory(dir_to_rename)
assert not file_io.is_directory(new_dir_name)
starttime_ms = int(round(time.time() * 1000))
print("Will try renaming directory %s to %s" % (dir_to_rename, new_dir_name))
file_io.rename(dir_to_rename, new_dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Renamed directory %s to %s in %s milliseconds" % (
dir_to_rename, new_dir_name, elapsed_ms))
assert not file_io.is_directory(dir_to_rename)
assert file_io.is_directory(new_dir_name)
# Test Delete directory recursively.
print("Deleting directory recursively %s." % dir_name)
starttime_ms = int(round(time.time() * 1000))
file_io.delete_recursively(dir_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
dir_exists = file_io.is_directory(dir_name)
assert not dir_exists
print("Deleted directory recursively %s in %s milliseconds" % (
dir_name, elapsed_ms))
def create_object_test():
"""Verifies file_io's object manipulation methods ."""
starttime_ms = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime_ms)
print("Creating dir %s." % dir_name)
file_io.create_dir(dir_name)
num_files = 5
# Create files of 2 different patterns in this directory.
files_pattern_1 = ["%s/test_file_%d.txt" % (dir_name, n)
for n in range(num_files)]
files_pattern_2 = ["%s/testfile%d.txt" % (dir_name, n)
for n in range(num_files)]
starttime_ms = int(round(time.time() * 1000))
files_to_create = files_pattern_1 + files_pattern_2
for file_name in files_to_create:
print("Creating file %s." % file_name)
file_io.write_string_to_file(file_name, "test file creation.")
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Created %d files in %s milliseconds" % (
len(files_to_create), elapsed_ms))
# Listing files of pattern1.
list_files_pattern = "%s/test_file*.txt" % dir_name
print("Getting files matching pattern %s." % list_files_pattern)
starttime_ms = int(round(time.time() * 1000))
files_list = file_io.get_matching_files(list_files_pattern)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed files in %s milliseconds" % elapsed_ms)
print(files_list)
assert set(files_list) == set(files_pattern_1)
# Listing files of pattern2.
list_files_pattern = "%s/testfile*.txt" % dir_name
print("Getting files matching pattern %s." % list_files_pattern)
starttime_ms = int(round(time.time() * 1000))
files_list = file_io.get_matching_files(list_files_pattern)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("Listed files in %s milliseconds" % elapsed_ms)
print(files_list)
assert set(files_list) == set(files_pattern_2)
# Test renaming file.
file_to_rename = "%s/oldname.txt" % dir_name
file_new_name = "%s/newname.txt" % dir_name
file_io.write_string_to_file(file_to_rename, "test file.")
assert file_io.file_exists(file_to_rename)
assert not file_io.file_exists(file_new_name)
print("Will try renaming file %s to %s" % (file_to_rename, file_new_name))
starttime_ms = int(round(time.time() * 1000))
file_io.rename(file_to_rename, file_new_name)
elapsed_ms = int(round(time.time() * 1000)) - starttime_ms
print("File %s renamed to %s in %s milliseconds" % (
file_to_rename, file_new_name, elapsed_ms))
assert not file_io.file_exists(file_to_rename)
assert file_io.file_exists(file_new_name)
# Delete directory.
print("Deleting directory %s." % dir_name)
file_io.delete_recursively(dir_name)
def main(argv):
del argv # Unused.
# Sanity check on the GCS bucket URL.
if not FLAGS.gcs_bucket_url or not FLAGS.gcs_bucket_url.startswith("gs://"):
print("ERROR: Invalid GCS bucket URL: \"%s\"" % FLAGS.gcs_bucket_url)
sys.exit(1)
# Generate random tfrecord path name.
input_path = FLAGS.gcs_bucket_url + "/"
input_path += "".join(random.choice("0123456789ABCDEF") for i in range(8))
input_path += ".tfrecord"
print("Using input path: %s" % input_path)
# Verify that writing to the records file in GCS works.
print("\n=== Testing writing and reading of GCS record file... ===")
example_data = create_examples(FLAGS.num_examples, 5)
with tf.python_io.TFRecordWriter(input_path) as hf:
for e in example_data:
hf.write(e.SerializeToString())
print("Data written to: %s" % input_path)
# Verify that reading from the tfrecord file works and that
# tf_record_iterator works.
record_iter = tf.python_io.tf_record_iterator(input_path)
read_count = 0
for _ in record_iter:
read_count += 1
print("Read %d records using tf_record_iterator" % read_count)
if read_count != FLAGS.num_examples:
print("FAIL: The number of records read from tf_record_iterator (%d) "
"differs from the expected number (%d)" % (read_count,
FLAGS.num_examples))
sys.exit(1)
# Verify that running the read op in a session works.
print("\n=== Testing TFRecordReader.read op in a session... ===")
with tf.Graph().as_default():
filename_queue = tf.train.string_input_producer([input_path], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
index = 0
for _ in range(FLAGS.num_examples):
print("Read record: %d" % index)
sess.run(serialized_example)
index += 1
# Reading one more record should trigger an exception.
try:
sess.run(serialized_example)
print("FAIL: Failed to catch the expected OutOfRangeError while "
"reading one more record than is available")
sys.exit(1)
except tf.errors.OutOfRangeError:
print("Successfully caught the expected OutOfRangeError while "
"reading one more record than is available")
create_dir_test()
create_object_test()
if __name__ == "__main__":
tf.app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/gcs_test/python/gcs_smoke.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visitor restricting traversal to only the public tensorflow API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'compiler',
'core',
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'python',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
# Externalized modules of the Keras API.
'tf.keras': ['applications', 'preprocessing']
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name, obj=None):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
del obj # Unused.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name, child):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/common/public_api.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python module traversal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tools.common import test_module1
from tensorflow.tools.common import test_module2
from tensorflow.tools.common import traverse
class TestVisitor(object):
def __init__(self):
self.call_log = []
def __call__(self, path, parent, children):
self.call_log += [(path, parent, children)]
class TraverseTest(googletest.TestCase):
def test_cycle(self):
class Cyclist(object):
pass
Cyclist.cycle = Cyclist
visitor = TestVisitor()
traverse.traverse(Cyclist, visitor)
# We simply want to make sure we terminate.
def test_module(self):
visitor = TestVisitor()
traverse.traverse(test_module1, visitor)
called = [parent for _, parent, _ in visitor.call_log]
self.assertIn(test_module1.ModuleClass1, called)
self.assertIn(test_module2.ModuleClass2, called)
def test_class(self):
visitor = TestVisitor()
traverse.traverse(TestVisitor, visitor)
self.assertEqual(TestVisitor,
visitor.call_log[0][1])
# There are a bunch of other members, but make sure that the ones we know
# about are there.
self.assertIn('__init__', [name for name, _ in visitor.call_log[0][2]])
self.assertIn('__call__', [name for name, _ in visitor.call_log[0][2]])
# There are more classes descended into, at least __class__ and
# __class__.__base__, neither of which are interesting to us, and which may
# change as part of Python version etc., so we don't test for them.
def test_non_class(self):
integer = 5
visitor = TestVisitor()
traverse.traverse(integer, visitor)
self.assertEqual([], visitor.call_log)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/common/traverse_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module target for TraverseTest.test_module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.common import test_module2
class ModuleClass1(object):
def __init__(self):
self._m2 = test_module2.ModuleClass2()
def __model_class1_method__(self):
pass
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/common/test_module1.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Traversing Python modules and classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import sys
from tensorflow.python.util import tf_inspect
__all__ = ['traverse']
def _traverse_internal(root, visit, stack, path):
"""Internal helper for traverse."""
# Only traverse modules and classes
if not tf_inspect.isclass(root) and not tf_inspect.ismodule(root):
return
try:
children = tf_inspect.getmembers(root)
# Add labels for duplicate values in Enum.
if tf_inspect.isclass(root) and issubclass(root, enum.Enum):
for enum_member in root.__members__.items():
if enum_member not in children:
children.append(enum_member)
children = sorted(children)
except ImportError:
# On some Python installations, some modules do not support enumerating
# members (six in particular), leading to import errors.
children = []
new_stack = stack + [root]
visit(path, root, children)
for name, child in children:
# Do not descend into built-in modules
if tf_inspect.ismodule(
child) and child.__name__ in sys.builtin_module_names:
continue
# Break cycles
if any(child is item for item in new_stack): # `in`, but using `is`
continue
child_path = path + '.' + name if path else name
_traverse_internal(child, visit, new_stack, child_path)
def traverse(root, visit):
"""Recursively enumerate all members of `root`.
Similar to the Python library function `os.path.walk`.
Traverses the tree of Python objects starting with `root`, depth first.
Parent-child relationships in the tree are defined by membership in modules or
classes. The function `visit` is called with arguments
`(path, parent, children)` for each module or class `parent` found in the tree
of python objects starting with `root`. `path` is a string containing the name
with which `parent` is reachable from the current context. For example, if
`root` is a local class called `X` which contains a class `Y`, `visit` will be
called with `('Y', X.Y, children)`).
If `root` is not a module or class, `visit` is never called. `traverse`
never descends into built-in modules.
`children`, a list of `(name, object)` pairs are determined by
`tf_inspect.getmembers`. To avoid visiting parts of the tree, `children` can
be modified in place, using `del` or slice assignment.
Cycles (determined by reference equality, `is`) stop the traversal. A stack of
objects is kept to find cycles. Objects forming cycles may appear in
`children`, but `visit` will not be called with any object as `parent` which
is already in the stack.
Traversing system modules can take a long time, it is advisable to pass a
`visit` callable which blacklists such modules.
Args:
root: A python object with which to start the traversal.
visit: A function taking arguments `(path, parent, children)`. Will be
called for each object found in the traversal.
"""
_traverse_internal(root, visit, [], '')
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/common/traverse.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module target for TraverseTest.test_module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ModuleClass2(object):
def __init__(self):
pass
def __model_class1_method__(self):
pass
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/common/test_module2.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.tools.common.public_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tools.common import public_api
class PublicApiTest(googletest.TestCase):
class TestVisitor(object):
def __init__(self):
self.symbols = set()
self.last_parent = None
self.last_children = None
def __call__(self, path, parent, children):
self.symbols.add(path)
self.last_parent = parent
self.last_children = list(children) # Make a copy to preserve state.
def test_call_forward(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('name2', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
self.assertEqual(set(['test']), visitor.symbols)
self.assertEqual('dummy', visitor.last_parent)
self.assertEqual([('name1', 'thing1'), ('name2', 'thing2')],
visitor.last_children)
def test_private_child_removal(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('_name2', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
# Make sure the private symbols are removed before the visitor is called.
self.assertEqual([('name1', 'thing1')], visitor.last_children)
self.assertEqual([('name1', 'thing1')], children)
def test_no_descent_child_removal(self):
visitor = self.TestVisitor()
children = [('name1', 'thing1'), ('mock', 'thing2')]
public_api.PublicAPIVisitor(visitor)('test', 'dummy', children)
# Make sure not-to-be-descended-into symbols are removed after the visitor
# is called.
self.assertEqual([('name1', 'thing1'), ('mock', 'thing2')],
visitor.last_children)
self.assertEqual([('name1', 'thing1')], children)
if __name__ == '__main__':
googletest.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/common/public_api_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a Python module containing information about the build."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
def write_build_info(filename, is_config_cuda, is_config_rocm, key_value_list):
"""Writes a Python that describes the build.
Args:
filename: filename to write to.
is_config_cuda: Whether this build is using CUDA.
is_config_rocm: Whether this build is using ROCm.
key_value_list: A list of "key=value" strings that will be added to the
module as additional fields.
Raises:
ValueError: If `key_value_list` includes the key "is_cuda_build", which
would clash with one of the default fields.
"""
module_docstring = "\"\"\"Generates a Python module containing information "
module_docstring += "about the build.\"\"\""
build_config_rocm_bool = "False"
build_config_cuda_bool = "False"
if is_config_rocm == "True":
build_config_rocm_bool = "True"
elif is_config_cuda == "True":
build_config_cuda_bool = "True"
key_value_pair_stmts = []
if key_value_list:
for arg in key_value_list:
key, value = arg.split("=")
if key == "is_cuda_build":
raise ValueError("The key \"is_cuda_build\" cannot be passed as one of "
"the --key_value arguments.")
if key == "is_rocm_build":
raise ValueError("The key \"is_rocm_build\" cannot be passed as one of "
"the --key_value arguments.")
key_value_pair_stmts.append("%s = %r" % (key, value))
key_value_pair_content = "\n".join(key_value_pair_stmts)
contents = """
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
%s
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
is_rocm_build = %s
is_cuda_build = %s
%s
""" % (module_docstring, build_config_rocm_bool, build_config_cuda_bool,
key_value_pair_content)
open(filename, "w").write(contents)
parser = argparse.ArgumentParser(
description="""Build info injection into the PIP package.""")
parser.add_argument(
"--is_config_cuda",
type=str,
help="'True' for CUDA GPU builds, 'False' otherwise.")
parser.add_argument(
"--is_config_rocm",
type=str,
help="'True' for ROCm GPU builds, 'False' otherwise.")
parser.add_argument("--raw_generate", type=str, help="Generate build_info.py")
parser.add_argument(
"--key_value", type=str, nargs="*", help="List of key=value pairs.")
args = parser.parse_args()
if (args.raw_generate is not None) and (args.is_config_cuda is not None) and (
args.is_config_rocm is not None):
write_build_info(args.raw_generate, args.is_config_cuda, args.is_config_rocm,
args.key_value)
else:
raise RuntimeError(
"--raw_generate, --is_config_cuda and --is_config_rocm must be used")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/build_info/gen_build_info.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests deprecation warnings in a few special cases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import module_wrapper
module_wrapper._PER_MODULE_WARNING_LIMIT = 5
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunction(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.initializers.tables_initializer()
self.assertEqual(0, mock_warning.call_count)
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"tables_initializer")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.tables_initializer")
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClass(self, mock_warning):
value = np.array([1, 2, 3])
row_splits = np.array([1])
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(0, mock_warning.call_count)
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"ragged.RaggedTensorValue")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.ragged.RaggedTensorValue")
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunctionEndpoint(self, mock_warning):
array = tf.IndexedSlices(
tf.compat.v1.convert_to_tensor(np.array([1, 2])),
tf.compat.v1.convert_to_tensor(np.array([0, 2])))
mask_indices = tf.compat.v1.convert_to_tensor(np.array([2]))
self.assertEqual(0, mock_warning.call_count)
tf.sparse.mask(array, mask_indices)
self.assertEqual(0, mock_warning.call_count)
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"sparse_mask")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
"sparse.mask")
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClassEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.io.VarLenFeature(tf.dtypes.int32)
self.assertEqual(0, mock_warning.call_count)
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"VarLenFeature")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"io.VarLenFeature")
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedConstantEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(0, mock_warning.call_count)
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2],
r"saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY")
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasDeprecationNoWarning(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.layers.GRUCell(20)
self.assertLessEqual(mock_warning.call_count, 1)
if mock_warning.call_count == 1:
# The only message printed should be due to referencing init op.
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"Call initializer instance with the dtype argument instead of "
"passing it to the constructor")
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasDeprecation(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.backend.get_session()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"tf.compat.v1.keras.backend.get_session")
tf.keras.backend.get_session()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasEndpointDeprecation(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.metrics.cosine_proximity([0.5], [0.5])
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"tf.keras.losses.cosine_similarity")
tf.keras.metrics.cosine_proximity([0.5], [0.5])
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testEstimatorDeprecation(self, mock_warning):
if "KMeans" in tf.estimator.experimental.__dict__:
self.assertEqual(0, mock_warning.call_count)
tf.estimator.experimental.KMeans(2)
self.assertEqual(2, mock_warning.call_count)
# First message is not a deprecation warning.
self.assertRegexpMatches(
mock_warning.call_args_list[1][0][0],
"Using temporary folder as model directory:")
# Second message is a deprecation warning.
self.assertRegexpMatches(
mock_warning.call_args_list[0][0][-1],
"tf.compat.v1.estimator.experimental.KMeans")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/api/tests/deprecation_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Smoke tests for tensorflow module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pkgutil
import tensorflow as tf
from tensorflow.python.platform import test
class ModuleTest(test.TestCase):
def testCanLoadWithPkgutil(self):
out = pkgutil.find_loader('tensorflow')
self.assertIsNotNone(out)
def testDocString(self):
self.assertIn('TensorFlow', tf.__doc__)
self.assertNotIn('Wrapper', tf.__doc__)
def testDict(self):
# Check that a few modules are in __dict__.
# pylint: disable=pointless-statement
tf.nn
tf.keras
tf.image
# pylint: enable=pointless-statement
self.assertIn('nn', tf.__dict__)
self.assertIn('keras', tf.__dict__)
self.assertIn('image', tf.__dict__)
def testName(self):
self.assertEqual('tensorflow', tf.__name__)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/api/tests/module_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import six
import tensorflow as tf
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, only_test_core_api, default False:
_ONLY_TEST_CORE_API_HELP = """
Some TF APIs are being moved outside of the tensorflow/ directory. There is
no guarantee which versions of these APIs will be present when running this
test. Therefore, do not error out on API changes in non-core TF code
if this flag is set.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER_V1 = 'tensorflow/tools/api/golden/v1'
_API_GOLDEN_FOLDER_V2 = 'tensorflow/tools/api/golden/v2'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
_NON_CORE_PACKAGES = ['estimator']
# TODO(annarev): remove this once we test with newer version of
# estimator that actually has compat v1 version.
if not hasattr(tf.compat.v1, 'estimator'):
tf.compat.v1.estimator = tf.estimator
tf.compat.v2.estimator = tf.estimator
def _KeyToFilePath(key, api_version):
"""From a given key, construct a filepath.
Filepath will be inside golden folder for api_version.
Args:
key: a string used to determine the file path
api_version: a number indicating the tensorflow API version, e.g. 1 or 2.
Returns:
A string of file path to the pbtxt file which describes the public API
"""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,
base_filename_without_ext)
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
"""A Visitor that crashes on subclasses of generated proto classes."""
# If the traversed object is a proto Message class
if not (isinstance(parent, type) and issubclass(parent, message.Message)):
return
if parent is message.Message:
return
# Check that it is a direct subclass of Message.
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
def _FilterNonCoreGoldenFiles(golden_file_list):
"""Filter out non-core API pbtxt files."""
filtered_file_list = []
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
for f in golden_file_list:
if any(
f.rsplit('/')[-1].startswith(pre) for pre in filtered_package_prefixes
):
continue
filtered_file_list.append(f)
return filtered_file_list
def _FilterGoldenProtoDict(golden_proto_dict, omit_golden_symbols_map):
"""Filter out golden proto dict symbols that should be omitted."""
if not omit_golden_symbols_map:
return golden_proto_dict
filtered_proto_dict = dict(golden_proto_dict)
for key, symbol_list in six.iteritems(omit_golden_symbols_map):
api_object = api_objects_pb2.TFAPIObject()
api_object.CopyFrom(filtered_proto_dict[key])
filtered_proto_dict[key] = api_object
module_or_class = None
if api_object.HasField('tf_module'):
module_or_class = api_object.tf_module
elif api_object.HasField('tf_class'):
module_or_class = api_object.tf_class
if module_or_class is not None:
for members in (module_or_class.member, module_or_class.member_method):
filtered_members = [m for m in members if m.name not in symbol_list]
# Two steps because protobuf repeated fields disallow slice assignment.
del members[:]
members.extend(filtered_members)
return filtered_proto_dict
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Do not truncate diff
self.maxDiff = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
def testNoSubclassOfMessage(self):
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
# Skip compat.v1 and compat.v2 since they are validated in separate tests.
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
def testNoSubclassOfMessageV1(self):
if not hasattr(tf.compat, 'v1'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
traverse.traverse(tf.compat.v1, visitor)
def testNoSubclassOfMessageV2(self):
if not hasattr(tf.compat, 'v2'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v2, visitor)
def _checkBackwardsCompatibility(self,
root,
golden_file_pattern,
api_version,
additional_private_map=None,
omit_golden_symbols_map=None):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.private_map['tf'] = ['contrib']
if api_version == 2:
public_api_visitor.private_map['tf'].append('enable_v2_behavior')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
if FLAGS.only_test_core_api:
public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
if additional_private_map:
public_api_visitor.private_map.update(additional_private_map)
traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
golden_file_list = file_io.get_matching_files(golden_file_pattern)
if FLAGS.only_test_core_api:
golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
golden_proto_dict = _FilterGoldenProtoDict(golden_proto_dict,
omit_golden_symbols_map)
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens,
api_version=api_version)
def testAPIBackwardsCompatibility(self):
api_version = 2 if '_api.v2' in tf.bitwise.__name__ else 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if api_version == 2 and FLAGS.only_test_core_api:
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text']
self._checkBackwardsCompatibility(
tf,
golden_file_pattern,
api_version,
# Skip compat.v1 and compat.v2 since they are validated
# in separate tests.
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
# Also check that V1 API has contrib
self.assertTrue(
api_version == 2 or
'tensorflow.python.util.lazy_loader.LazyLoader'
in str(type(tf.contrib)))
# Check that V2 API does not have contrib
self.assertTrue(api_version == 1 or not hasattr(tf, 'contrib'))
def testAPIBackwardsCompatibilityV1(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf.compat.v1, golden_file_pattern, api_version,
additional_private_map={'tf': ['pywrap_tensorflow']},
omit_golden_symbols_map={'tensorflow': ['pywrap_tensorflow']})
def testAPIBackwardsCompatibilityV2(self):
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if FLAGS.only_test_core_api:
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text']
self._checkBackwardsCompatibility(
tf.compat.v2,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
# TODO(mikecase): Create Estimator's own API compatibility test or
# a more general API compatibility test for use for TF components.
parser.add_argument(
'--only_test_core_api',
type=bool,
default=True, # only_test_core_api default value
help=_ONLY_TEST_CORE_API_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/api/tests/api_compatibility_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""A visitor class that generates protobufs for each python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import enum
from google.protobuf import message
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.tools.api.lib import api_objects_pb2
# Following object need to be handled individually.
_CORNER_CASES = {
'': {
'tools': {}
},
'test.TestCase': {},
'test.TestCase.failureException': {},
'train.NanLossDuringTrainingError': {
'message': {}
},
'estimator.NanLossDuringTrainingError': {
'message': {}
},
'train.LooperThread': {
'join': {},
'native_id': {}
}
}
# Python 2 vs. 3 differences
if sys.version_info.major == 3:
_NORMALIZE_TYPE = {}
for t in ('property', 'object', 'getset_descriptor', 'int', 'str', 'type',
'tuple', 'module', 'collections.defaultdict', 'set', 'dict',
'NoneType', 'frozenset'):
_NORMALIZE_TYPE["<class '%s'>" % t] = "<type '%s'>" % t
for e in 'Exception', 'RuntimeError':
_NORMALIZE_TYPE["<class '%s'>" % e] = "<type 'exceptions.%s'>" % e
_NORMALIZE_TYPE["<class 'abc.ABCMeta'>"] = "<type 'type'>"
_NORMALIZE_ISINSTANCE = {
"<class "
"'tensorflow.lite.python.op_hint.OpHint.OpHintArgumentTracker'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.lite.python.op_hint.OpHintArgumentTracker'>",
"<class "
"'tensorflow.python.training.monitored_session._MonitoredSession.StepContext'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.python.training.monitored_session.StepContext'>",
"<class "
"'tensorflow.python.ops.variables.Variable.SaveSliceInfo'>":
"<class "
"'tensorflow.python.ops.variables.SaveSliceInfo'>"
}
def _SkipMember(cls, member):
return (member == 'with_traceback' or member in ('name', 'value') and
isinstance(cls, type) and issubclass(cls, enum.Enum))
else:
_NORMALIZE_TYPE = {"<class 'abc.ABCMeta'>": "<type 'type'>"}
_NORMALIZE_ISINSTANCE = {}
def _SkipMember(cls, member): # pylint: disable=unused-argument
return False
if sys.version_info.major == 3 and sys.version_info.minor >= 8:
_NORMALIZE_TYPE["<class '_collections._tuplegetter'>"] = "<type 'property'>"
def _NormalizeType(ty):
return _NORMALIZE_TYPE.get(ty, ty)
def _NormalizeIsInstance(ty):
return _NORMALIZE_ISINSTANCE.get(ty, ty)
def _SanitizedArgSpec(obj):
"""Get an ArgSpec string that is free of addresses.
We have callables as function arg defaults. This results in addresses in
getargspec output. This function returns a sanitized string list of base
classes.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
string, a string representation of the argspec.
"""
output_string = ''
unsanitized_arg_spec = tf_inspect.getargspec(obj)
for clean_attr in ('args', 'varargs', 'keywords'):
output_string += '%s=%s, ' % (clean_attr,
getattr(unsanitized_arg_spec, clean_attr))
if unsanitized_arg_spec.defaults:
sanitized_defaults = []
for val in unsanitized_arg_spec.defaults:
str_val = str(val)
# Sanitize argspecs that have hex code in them.
if ' at 0x' in str_val:
sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])
else:
sanitized_defaults.append(str_val)
output_string += 'defaults=%s, ' % sanitized_defaults
else:
output_string += 'defaults=None'
return output_string
def _SanitizedMRO(obj):
"""Get a list of superclasses with minimal amount of non-TF classes.
Based on many parameters like python version, OS, protobuf implementation
or changes in google core libraries the list of superclasses of a class
can change. We only return the first non-TF class to be robust to non API
affecting changes. The Method Resolution Order returned by `tf_inspect.getmro`
is still maintained in the return value.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
list of strings, string representation of the class names.
"""
return_list = []
for cls in tf_inspect.getmro(obj):
if cls.__name__ == '_NewClass':
# Ignore class created by @deprecated_alias decorator.
continue
str_repr = _NormalizeType(str(cls))
return_list.append(str_repr)
if 'tensorflow' not in str_repr:
break
# Hack - tensorflow.test.StubOutForTesting may or may not be type <object>
# depending on the environment. To avoid inconsistency, break after we add
# StubOutForTesting to the return_list.
if 'StubOutForTesting' in str_repr:
break
return return_list
def _IsProtoClass(obj):
"""Returns whether the passed obj is a Protocol Buffer class."""
return isinstance(obj, type) and issubclass(obj, message.Message)
class PythonObjectToProtoVisitor(object):
"""A visitor that summarizes given python objects as protobufs."""
def __init__(self):
# A dict to store all protocol buffers.
# Keyed by "path" to the object.
self._protos = {}
def GetProtos(self):
"""Return the list of protos stored."""
return self._protos
def __call__(self, path, parent, children):
# The path to the object.
lib_path = 'tensorflow.%s' % path if path else 'tensorflow'
_, parent = tf_decorator.unwrap(parent)
# A small helper method to construct members(children) protos.
def _AddMember(member_name, member_obj, proto):
"""Add the child object to the object being constructed."""
_, member_obj = tf_decorator.unwrap(member_obj)
if (_SkipMember(parent, member_name) or
isinstance(member_obj, deprecation.HiddenTfApiAttribute)):
return
if member_name == '__init__' or not member_name.startswith('_'):
if tf_inspect.isroutine(member_obj):
new_method = proto.member_method.add()
new_method.name = member_name
# If member_obj is a python builtin, there is no way to get its
# argspec, because it is implemented on the C side. It also has no
# func_code.
if hasattr(member_obj, '__code__'):
new_method.argspec = _SanitizedArgSpec(member_obj)
else:
new_member = proto.member.add()
new_member.name = member_name
if tf_inspect.ismodule(member_obj):
new_member.mtype = "<type \'module\'>"
else:
new_member.mtype = _NormalizeType(str(type(member_obj)))
parent_corner_cases = _CORNER_CASES.get(path, {})
if path not in _CORNER_CASES or parent_corner_cases:
# Decide if we have a module or a class.
if tf_inspect.ismodule(parent):
# Create a module object.
module_obj = api_objects_pb2.TFAPIModule()
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
module_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, module_obj)
# Store the constructed module object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_module=module_obj)
elif _IsProtoClass(parent):
proto_obj = api_objects_pb2.TFAPIProto()
parent.DESCRIPTOR.CopyToProto(proto_obj.descriptor)
# Store the constructed proto object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_proto=proto_obj)
elif tf_inspect.isclass(parent):
# Construct a class.
class_obj = api_objects_pb2.TFAPIClass()
class_obj.is_instance.extend(
_NormalizeIsInstance(i) for i in _SanitizedMRO(parent))
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
class_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, class_obj)
# Store the constructed class object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_class=class_obj)
else:
logging.error('Illegal call to ApiProtoDump::_py_obj_to_proto.'
'Object is neither a module nor a class: %s', path)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/api/lib/python_object_to_proto_visitor.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exposes the Python wrapper for graph transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import errors
from tensorflow.python.pywrap_tensorflow import TransformGraphWithStringInputs
from tensorflow.python.util import compat
def TransformGraph(input_graph_def, inputs, outputs, transforms):
"""Python wrapper for the Graph Transform Tool.
Gives access to all graph transforms available through the command line tool.
See documentation at https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md
for full details of the options available.
Args:
input_graph_def: GraphDef object containing a model to be transformed.
inputs: List of node names for the model inputs.
outputs: List of node names for the model outputs.
transforms: List of strings containing transform names and parameters.
Returns:
New GraphDef with transforms applied.
"""
input_graph_def_string = input_graph_def.SerializeToString()
inputs_string = compat.as_bytes(",".join(inputs))
outputs_string = compat.as_bytes(",".join(outputs))
transforms_string = compat.as_bytes(" ".join(transforms))
with errors.raise_exception_on_not_ok_status() as status:
output_graph_def_string = TransformGraphWithStringInputs(
input_graph_def_string, inputs_string, outputs_string,
transforms_string, status)
output_graph_def = graph_pb2.GraphDef()
output_graph_def.ParseFromString(output_graph_def_string)
return output_graph_def
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/graph_transforms/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import test
from tensorflow.tools.graph_transforms import TransformGraph
class TransformGraphTest(test.TestCase):
# This test constructs a graph with a relu op that's not used by the normal
# inference path, and then tests that the strip_unused transform removes it as
# expected.
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = TransformGraph(input_graph_def, input_names,
output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/graph_transforms/python/transform_graph_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Multipurpose TensorFlow Docker Helper.
- Assembles Dockerfiles
- Builds images (and optionally runs image tests)
- Pushes images to Docker Hub (provided with credentials)
Logs are written to stderr; the list of successfully built images is
written to stdout.
Read README.md (in this directory) for instructions!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import errno
import itertools
import json
import multiprocessing
import os
import platform
import re
import shutil
import sys
from absl import app
from absl import flags
import cerberus
import docker
import yaml
FLAGS = flags.FLAGS
flags.DEFINE_string('hub_username', None,
'Dockerhub username, only used with --upload_to_hub')
flags.DEFINE_string(
'hub_password', None,
('Dockerhub password, only used with --upload_to_hub. Use from an env param'
' so your password isn\'t in your history.'))
flags.DEFINE_integer('hub_timeout', 3600,
'Abort Hub upload if it takes longer than this.')
flags.DEFINE_string(
'repository', 'tensorflow',
'Tag local images as {repository}:tag (in addition to the '
'hub_repository, if uploading to hub)')
flags.DEFINE_string(
'hub_repository', None,
'Push tags to this Docker Hub repository, e.g. tensorflow/tensorflow')
flags.DEFINE_boolean(
'upload_to_hub',
False,
('Push built images to Docker Hub (you must also provide --hub_username, '
'--hub_password, and --hub_repository)'),
short_name='u',
)
flags.DEFINE_boolean(
'construct_dockerfiles', False, 'Do not build images', short_name='d')
flags.DEFINE_boolean(
'keep_temp_dockerfiles',
False,
'Retain .temp.Dockerfiles created while building images.',
short_name='k')
flags.DEFINE_boolean(
'build_images', False, 'Do not build images', short_name='b')
flags.DEFINE_string(
'run_tests_path', None,
('Execute test scripts on generated Dockerfiles before pushing them. '
'Flag value must be a full path to the "tests" directory, which is usually'
' $(realpath ./tests). A failed tests counts the same as a failed build.'))
flags.DEFINE_boolean(
'stop_on_failure', False,
('Stop processing tags if any one build fails. If False or not specified, '
'failures are reported but do not affect the other images.'))
flags.DEFINE_boolean(
'dry_run',
False,
'Do not build or deploy anything at all.',
short_name='n',
)
flags.DEFINE_string(
'exclude_tags_matching',
None,
('Regular expression that skips processing on any tag it matches. Must '
'match entire string, e.g. ".*gpu.*" ignores all GPU tags.'),
short_name='x')
flags.DEFINE_string(
'only_tags_matching',
None,
('Regular expression that skips processing on any tag it does not match. '
'Must match entire string, e.g. ".*gpu.*" includes only GPU tags.'),
short_name='i')
flags.DEFINE_string(
'dockerfile_dir',
'./dockerfiles', 'Path to an output directory for Dockerfiles.'
' Will be created if it doesn\'t exist.'
' Existing files in this directory will be deleted when new Dockerfiles'
' are made.',
short_name='o')
flags.DEFINE_string(
'partial_dir',
'./partials',
'Path to a directory containing foo.partial.Dockerfile partial files.'
' can have subdirectories, e.g. "bar/baz.partial.Dockerfile".',
short_name='p')
flags.DEFINE_multi_string(
'release', [],
'Set of releases to build and tag. Defaults to every release type.',
short_name='r')
flags.DEFINE_multi_string(
'arg', [],
('Extra build arguments. These are used for expanding tag names if needed '
'(e.g. --arg _TAG_PREFIX=foo) and for using as build arguments (unused '
'args will print a warning).'),
short_name='a')
flags.DEFINE_boolean(
'nocache', False,
'Disable the Docker build cache; identical to "docker build --no-cache"')
flags.DEFINE_string(
'spec_file',
'./spec.yml',
'Path to the YAML specification file',
short_name='s')
# Schema to verify the contents of tag-spec.yml with Cerberus.
# Must be converted to a dict from yaml to work.
# Note: can add python references with e.g.
# !!python/name:builtins.str
# !!python/name:__main__.funcname
# (but this may not be considered safe?)
SCHEMA_TEXT = """
header:
type: string
slice_sets:
type: dict
keyschema:
type: string
valueschema:
type: list
schema:
type: dict
schema:
add_to_name:
type: string
dockerfile_exclusive_name:
type: string
dockerfile_subdirectory:
type: string
partials:
type: list
schema:
type: string
ispartial: true
test_runtime:
type: string
required: false
tests:
type: list
default: []
schema:
type: string
args:
type: list
default: []
schema:
type: string
isfullarg: true
releases:
type: dict
keyschema:
type: string
valueschema:
type: dict
schema:
is_dockerfiles:
type: boolean
required: false
default: false
upload_images:
type: boolean
required: false
default: true
tag_specs:
type: list
required: true
schema:
type: string
"""
class TfDockerTagValidator(cerberus.Validator):
"""Custom Cerberus validator for TF tag spec.
Note: Each _validate_foo function's docstring must end with a segment
describing its own validation schema, e.g. "The rule's arguments are...". If
you add a new validator, you can copy/paste that section.
"""
def __init__(self, *args, **kwargs):
# See http://docs.python-cerberus.org/en/stable/customize.html
if 'partials' in kwargs:
self.partials = kwargs['partials']
super(cerberus.Validator, self).__init__(*args, **kwargs)
def _validate_ispartial(self, ispartial, field, value):
"""Validate that a partial references an existing partial spec.
Args:
ispartial: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if ispartial and value not in self.partials:
self._error(field,
'{} is not present in the partials directory.'.format(value))
def _validate_isfullarg(self, isfullarg, field, value):
"""Validate that a string is either a FULL=arg or NOT.
Args:
isfullarg: Value of the rule, a bool
field: The field being validated
value: The field's value
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if isfullarg and '=' not in value:
self._error(field, '{} should be of the form ARG=VALUE.'.format(value))
if not isfullarg and '=' in value:
self._error(field, '{} should be of the form ARG (no =).'.format(value))
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, flush=True, **kwargs)
def aggregate_all_slice_combinations(spec, slice_set_names):
"""Figure out all of the possible slice groupings for a tag spec."""
slice_sets = copy.deepcopy(spec['slice_sets'])
for name in slice_set_names:
for slice_set in slice_sets[name]:
slice_set['set_name'] = name
slices_grouped_but_not_keyed = [slice_sets[name] for name in slice_set_names]
all_slice_combos = list(itertools.product(*slices_grouped_but_not_keyed))
return all_slice_combos
def build_name_from_slices(format_string, slices, args, is_dockerfile=False):
"""Build the tag name (cpu-devel...) from a list of slices."""
name_formatter = copy.deepcopy(args)
name_formatter.update({s['set_name']: s['add_to_name'] for s in slices})
name_formatter.update({
s['set_name']: s['dockerfile_exclusive_name']
for s in slices
if is_dockerfile and 'dockerfile_exclusive_name' in s
})
name = format_string.format(**name_formatter)
return name
def update_args_dict(args_dict, updater):
"""Update a dict of arg values with more values from a list or dict."""
if isinstance(updater, list):
for arg in updater:
key, sep, value = arg.partition('=')
if sep == '=':
args_dict[key] = value
if isinstance(updater, dict):
for key, value in updater.items():
args_dict[key] = value
return args_dict
def get_slice_sets_and_required_args(slice_sets, tag_spec):
"""Extract used-slice-sets and required CLI arguments from a spec string.
For example, {FOO}{bar}{bat} finds FOO, bar, and bat. Assuming bar and bat
are both named slice sets, FOO must be specified on the command line.
Args:
slice_sets: Dict of named slice sets
tag_spec: The tag spec string, e.g. {_FOO}{blep}
Returns:
(used_slice_sets, required_args), a tuple of lists
"""
required_args = []
used_slice_sets = []
extract_bracketed_words = re.compile(r'\{([^}]+)\}')
possible_args_or_slice_set_names = extract_bracketed_words.findall(tag_spec)
for name in possible_args_or_slice_set_names:
if name in slice_sets:
used_slice_sets.append(name)
else:
required_args.append(name)
return (used_slice_sets, required_args)
def gather_tag_args(slices, cli_input_args, required_args):
"""Build a dictionary of all the CLI and slice-specified args for a tag."""
args = {}
for s in slices:
args = update_args_dict(args, s['args'])
args = update_args_dict(args, cli_input_args)
for arg in required_args:
if arg not in args:
eprint(('> Error: {} is not a valid slice_set, and also isn\'t an arg '
'provided on the command line. If it is an arg, please specify '
'it with --arg. If not, check the slice_sets list.'.format(arg)))
exit(1)
return args
def gather_slice_list_items(slices, key):
"""For a list of slices, get the flattened list of all of a certain key."""
return list(itertools.chain(*[s[key] for s in slices if key in s]))
def find_first_slice_value(slices, key):
"""For a list of slices, get the first value for a certain key."""
for s in slices:
if key in s and s[key] is not None:
return s[key]
return None
def assemble_tags(spec, cli_args, enabled_releases, all_partials):
"""Gather all the tags based on our spec.
Args:
spec: Nested dict containing full Tag spec
cli_args: List of ARG=foo arguments to pass along to Docker build
enabled_releases: List of releases to parse. Empty list = all
all_partials: Dict of every partial, for reference
Returns:
Dict of tags and how to build them
"""
tag_data = collections.defaultdict(list)
for name, release in spec['releases'].items():
for tag_spec in release['tag_specs']:
if enabled_releases and name not in enabled_releases:
eprint('> Skipping release {}'.format(name))
continue
used_slice_sets, required_cli_args = get_slice_sets_and_required_args(
spec['slice_sets'], tag_spec)
slice_combos = aggregate_all_slice_combinations(spec, used_slice_sets)
for slices in slice_combos:
tag_args = gather_tag_args(slices, cli_args, required_cli_args)
tag_name = build_name_from_slices(tag_spec, slices, tag_args,
release['is_dockerfiles'])
used_partials = gather_slice_list_items(slices, 'partials')
used_tests = gather_slice_list_items(slices, 'tests')
test_runtime = find_first_slice_value(slices, 'test_runtime')
dockerfile_subdirectory = find_first_slice_value(
slices, 'dockerfile_subdirectory')
dockerfile_contents = merge_partials(spec['header'], used_partials,
all_partials)
tag_data[tag_name].append({
'release': name,
'tag_spec': tag_spec,
'is_dockerfiles': release['is_dockerfiles'],
'upload_images': release['upload_images'],
'cli_args': tag_args,
'dockerfile_subdirectory': dockerfile_subdirectory or '',
'partials': used_partials,
'tests': used_tests,
'test_runtime': test_runtime,
'dockerfile_contents': dockerfile_contents,
})
return tag_data
def merge_partials(header, used_partials, all_partials):
"""Merge all partial contents with their header."""
used_partials = list(used_partials)
return '\n'.join([header] + [all_partials[u] for u in used_partials])
def upload_in_background(hub_repository, dock, image, tag):
"""Upload a docker image (to be used by multiprocessing)."""
image.tag(hub_repository, tag=tag)
print(dock.images.push(hub_repository, tag=tag))
def mkdir_p(path):
"""Create a directory and its parents, even if it already exists."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def gather_existing_partials(partial_path):
"""Find and read all available partials.
Args:
partial_path (string): read partials from this directory.
Returns:
Dict[string, string] of partial short names (like "ubuntu/python" or
"bazel") to the full contents of that partial.
"""
partials = {}
for path, _, files in os.walk(partial_path):
for name in files:
fullpath = os.path.join(path, name)
if '.partial.Dockerfile' not in fullpath:
eprint(('> Probably not a problem: skipping {}, which is not a '
'partial.').format(fullpath))
continue
# partial_dir/foo/bar.partial.Dockerfile -> foo/bar
simple_name = fullpath[len(partial_path) + 1:-len('.partial.dockerfile')]
with open(fullpath, 'r') as f:
partial_contents = f.read()
partials[simple_name] = partial_contents
return partials
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Read the full spec file, used for everything
with open(FLAGS.spec_file, 'r') as spec_file:
tag_spec = yaml.safe_load(spec_file)
# Get existing partial contents
partials = gather_existing_partials(FLAGS.partial_dir)
# Abort if spec.yaml is invalid
schema = yaml.safe_load(SCHEMA_TEXT)
v = TfDockerTagValidator(schema, partials=partials)
if not v.validate(tag_spec):
eprint('> Error: {} is an invalid spec! The errors are:'.format(
FLAGS.spec_file))
eprint(yaml.dump(v.errors, indent=2))
exit(1)
tag_spec = v.normalized(tag_spec)
# Assemble tags and images used to build them
all_tags = assemble_tags(tag_spec, FLAGS.arg, FLAGS.release, partials)
# Empty Dockerfile directory if building new Dockerfiles
if FLAGS.construct_dockerfiles:
eprint('> Emptying Dockerfile dir "{}"'.format(FLAGS.dockerfile_dir))
shutil.rmtree(FLAGS.dockerfile_dir, ignore_errors=True)
mkdir_p(FLAGS.dockerfile_dir)
# Set up Docker helper
dock = docker.from_env()
# Login to Docker if uploading images
if FLAGS.upload_to_hub:
if not FLAGS.hub_username:
eprint('> Error: please set --hub_username when uploading to Dockerhub.')
exit(1)
if not FLAGS.hub_repository:
eprint(
'> Error: please set --hub_repository when uploading to Dockerhub.')
exit(1)
if not FLAGS.hub_password:
eprint('> Error: please set --hub_password when uploading to Dockerhub.')
exit(1)
dock.login(
username=FLAGS.hub_username,
password=FLAGS.hub_password,
)
# Each tag has a name ('tag') and a definition consisting of the contents
# of its Dockerfile, its build arg list, etc.
failed_tags = []
succeeded_tags = []
for tag, tag_defs in all_tags.items():
for tag_def in tag_defs:
eprint('> Working on {}'.format(tag))
if FLAGS.exclude_tags_matching and re.match(FLAGS.exclude_tags_matching,
tag):
eprint('>> Excluded due to match against "{}".'.format(
FLAGS.exclude_tags_matching))
continue
if FLAGS.only_tags_matching and not re.match(FLAGS.only_tags_matching,
tag):
eprint('>> Excluded due to failure to match against "{}".'.format(
FLAGS.only_tags_matching))
continue
# Write releases marked "is_dockerfiles" into the Dockerfile directory
if FLAGS.construct_dockerfiles and tag_def['is_dockerfiles']:
path = os.path.join(FLAGS.dockerfile_dir,
tag_def['dockerfile_subdirectory'],
tag + '.Dockerfile')
eprint('>> Writing {}...'.format(path))
if not FLAGS.dry_run:
mkdir_p(os.path.dirname(path))
with open(path, 'w') as f:
f.write(tag_def['dockerfile_contents'])
# Don't build any images for dockerfile-only releases
if not FLAGS.build_images:
continue
# Only build images for host architecture
proc_arch = platform.processor()
is_x86 = proc_arch.startswith('x86')
if (is_x86 and any([arch in tag for arch in ['ppc64le']]) or
not is_x86 and proc_arch not in tag):
continue
# Generate a temporary Dockerfile to use to build, since docker-py
# needs a filepath relative to the build context (i.e. the current
# directory)
dockerfile = os.path.join(FLAGS.dockerfile_dir, tag + '.temp.Dockerfile')
if not FLAGS.dry_run:
with open(dockerfile, 'w') as f:
f.write(tag_def['dockerfile_contents'])
eprint('>> (Temporary) writing {}...'.format(dockerfile))
repo_tag = '{}:{}'.format(FLAGS.repository, tag)
eprint('>> Building {} using build args:'.format(repo_tag))
for arg, value in tag_def['cli_args'].items():
eprint('>>> {}={}'.format(arg, value))
# Note that we are NOT using cache_from, which appears to limit
# available cache layers to those from explicitly specified layers. Many
# of our layers are similar between local builds, so we want to use the
# implied local build cache.
tag_failed = False
image, logs = None, []
if not FLAGS.dry_run:
try:
# Use low level APIClient in order to stream log output
resp = dock.api.build(
timeout=FLAGS.hub_timeout,
path='.',
nocache=FLAGS.nocache,
dockerfile=dockerfile,
buildargs=tag_def['cli_args'],
tag=repo_tag)
last_event = None
image_id = None
# Manually process log output extracting build success and image id
# in order to get built image
while True:
try:
output = next(resp).decode('utf-8')
json_output = json.loads(output.strip('\r\n'))
if 'stream' in json_output:
eprint(json_output['stream'], end='')
match = re.search(r'(^Successfully built |sha256:)([0-9a-f]+)$',
json_output['stream'])
if match:
image_id = match.group(2)
last_event = json_output['stream']
# collect all log lines into the logs object
logs.append(json_output)
except StopIteration:
eprint('Docker image build complete.')
break
except ValueError:
eprint('Error parsing from docker image build: {}'.format(output))
# If Image ID is not set, the image failed to built properly. Raise
# an error in this case with the last log line and all logs
if image_id:
image = dock.images.get(image_id)
else:
raise docker.errors.BuildError(last_event or 'Unknown', logs)
# Run tests if requested, and dump output
# Could be improved by backgrounding, but would need better
# multiprocessing support to track failures properly.
if FLAGS.run_tests_path:
if not tag_def['tests']:
eprint('>>> No tests to run.')
for test in tag_def['tests']:
eprint('>> Testing {}...'.format(test))
container, = dock.containers.run(
image,
'/tests/' + test,
working_dir='/',
log_config={'type': 'journald'},
detach=True,
stderr=True,
stdout=True,
volumes={
FLAGS.run_tests_path: {
'bind': '/tests',
'mode': 'ro'
}
},
runtime=tag_def['test_runtime']),
ret = container.wait()
code = ret['StatusCode']
out = container.logs(stdout=True, stderr=False)
err = container.logs(stdout=False, stderr=True)
container.remove()
if out:
eprint('>>> Output stdout:')
eprint(out.decode('utf-8'))
else:
eprint('>>> No test standard out.')
if err:
eprint('>>> Output stderr:')
eprint(out.decode('utf-8'))
else:
eprint('>>> No test standard err.')
if code != 0:
eprint('>> {} failed tests with status: "{}"'.format(
repo_tag, code))
failed_tags.append(tag)
tag_failed = True
if FLAGS.stop_on_failure:
eprint('>> ABORTING due to --stop_on_failure!')
exit(1)
else:
eprint('>> Tests look good!')
except docker.errors.BuildError as e:
eprint('>> {} failed to build with message: "{}"'.format(
repo_tag, e.msg))
eprint('>> Build logs follow:')
log_lines = [l.get('stream', '') for l in e.build_log]
eprint(''.join(log_lines))
failed_tags.append(tag)
tag_failed = True
if FLAGS.stop_on_failure:
eprint('>> ABORTING due to --stop_on_failure!')
exit(1)
# Clean temporary dockerfiles if they were created earlier
if not FLAGS.keep_temp_dockerfiles:
os.remove(dockerfile)
# Upload new images to DockerHub as long as they built + passed tests
if FLAGS.upload_to_hub:
if not tag_def['upload_images']:
continue
if tag_failed:
continue
eprint('>> Uploading to {}:{}'.format(FLAGS.hub_repository, tag))
if not FLAGS.dry_run:
p = multiprocessing.Process(
target=upload_in_background,
args=(FLAGS.hub_repository, dock, image, tag))
p.start()
if not tag_failed:
succeeded_tags.append(tag)
if failed_tags:
eprint(
'> Some tags failed to build or failed testing, check scrollback for '
'errors: {}'.format(','.join(failed_tags)))
exit(1)
eprint('> Writing built{} tags to standard out.'.format(
' and tested' if FLAGS.run_tests_path else ''))
for tag in succeeded_tags:
print('{}:{}'.format(FLAGS.repository, tag))
if __name__ == '__main__':
app.run(main)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/dockerfiles/assembler.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import bytes # pylint: disable=redefined-builtin
import argparse
import json
import os
import shutil
import subprocess
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, gen_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
elif not os.path.exists(src):
# Git repo is configured in a way we don't support such as having
# packed refs. Even though in a git repo, tf.__git_version__ will not
# be accurate.
# TODO(mikecase): Support grabbing git info when using packed refs.
open(os.path.join(gen_path, target), "w").write("")
spec["git"] = False
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, "symlink"):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path, git_tag_override):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
# Force to bytes so this works on python 2 and python 3
val = bytes(subprocess.check_output([
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
version_separator = b"-"
if git_tag_override and val:
split_val = val.split(version_separator)
if len(split_val) < 3:
raise Exception(
("Expected git version in format 'TAG-COMMITS AFTER TAG-HASH' "
"but got '%s'") % val)
# There might be "-" in the tag name. But we can be sure that the final
# two "-" are those inserted by the git describe command.
abbrev_commit = split_val[-1]
val = version_separator.join(
[bytes(git_tag_override, "utf-8"), b"0", abbrev_commit])
return val if val else unknown_label
except (subprocess.CalledProcessError, OSError):
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = b"git_version_is_invalid" # do not cause build to fail!
contents = """/* Generated by gen_git_source.py */
#include <string>
const char* tf_git_version() {return "%s";}
const char* tf_compiler_version() {
#ifdef _MSC_VER
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
return "MSVC " TOSTRING(_MSC_FULL_VER);
#else
return __VERSION__;
#endif
}
int tf_cxx11_abi_flag() {
#ifdef _GLIBCXX_USE_CXX11_ABI
return _GLIBCXX_USE_CXX11_ABI;
#else
return 0;
#endif
}
int tf_monolithic_build() {
#ifdef TENSORFLOW_MONOLITHIC_BUILD
return 1;
#else
return 0;
#endif
}
""" % git_version.decode("utf-8")
open(filename, "w").write(contents)
def generate(arglist, git_tag_override=None):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"], git_tag_override)
write_version_info(dest_file, git_version)
def raw_generate(output_file, source_dir, git_tag_override=None):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
source_dir: Base path of the source code
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
"""
git_version = get_git_version(source_dir, git_tag_override)
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--gen_root_path", type=str,
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
"--git_tag_override", type=str,
help="Override git tag value in the __git_version__ string. Useful when "
"creating release builds before the release tag is created.")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
parser.add_argument(
"--source_dir",
type=str,
help="Base path of the source code (used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
if args.gen_root_path is None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
generate(args.generate, args.git_tag_override)
elif args.raw_generate is not None:
source_path = "."
if args.source_dir is not None:
source_path = args.source_dir
raw_generate(args.raw_generate, source_path, args.git_tag_override)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/tools/git/gen_git_source.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to help with the TensorFlow 2.0 transition.
This module is meant for TensorFlow internal implementation, not for users of
the TensorFlow library. For that see tf.compat instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
_force_enable = None
def enable():
"""Enables v2 behaviors."""
global _force_enable
_force_enable = True
def disable():
"""Disables v2 behaviors."""
global _force_enable
_force_enable = False
def enabled():
"""Returns True iff TensorFlow 2.0 behavior should be enabled."""
if _force_enable is None:
return os.getenv("TF2_BEHAVIOR", "0") != "0"
else:
return _force_enable
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/tf2.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import core names of TensorFlow.
Programs that want to build TensorFlow Ops and Graphs without having to import
the constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
"""
import ctypes
import importlib
import sys
import traceback
# TODO(drpng): write up instructions for editing this file in a doc and point to
# the doc instead.
# If you want to edit this file to expose modules in public tensorflow API, you
# need to follow these steps:
# 1. Consult with tensorflow team and get approval for adding a new API to the
# public interface.
# 2. Document the module in the gen_docs_combined.py.
# 3. Import the module in the main tensorflow namespace by adding an import
# statement in this file.
# 4. Sanitize the entry point by making sure that your module does not expose
# transitively imported modules used for implementation, such as os, sys.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
import numpy as np
from tensorflow.python import pywrap_tensorflow
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import config
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
# pylint: enable=wildcard-import
# Bring in subpackages.
from tensorflow.python import data
from tensorflow.python import distribute
from tensorflow.python import keras
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.layers import layers
from tensorflow.python.module import module
from tensorflow.python.ops import bitwise_ops as bitwise
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import image_ops as image
from tensorflow.python.ops import manip_ops as manip
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import ragged
from tensorflow.python.ops import sets
from tensorflow.python.ops import stateful_random_ops
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.signal import signal
from tensorflow.python.profiler import profiler
from tensorflow.python.saved_model import saved_model
from tensorflow.python.summary import summary
from tensorflow.python.tpu import api
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
# Import audio ops to make sure the ops are registered.
from tensorflow.python.ops import gen_audio_ops as _
# Import boosted trees ops to make sure the ops are registered (but unused).
from tensorflow.python.ops import gen_boosted_trees_ops as _gen_boosted_trees_ops
# Import cudnn rnn ops to make sure their ops are registered.
from tensorflow.python.ops import gen_cudnn_rnn_ops as _
# Import rnn_ops to make sure their ops are registered.
from tensorflow.python.ops import gen_rnn_ops as _
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import sysconfig
from tensorflow.python.platform import test
from tensorflow.python.compat import v2_compat
from tensorflow.python.util.all_util import make_all
from tensorflow.python.util.tf_export import tf_export
# Eager execution
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.remote import connect_to_remote_host
from tensorflow.python.eager.def_function import function
from tensorflow.python.framework.ops import enable_eager_execution
# Check whether TF2_BEHAVIOR is turned on.
from tensorflow.python.eager import monitoring as _monitoring
from tensorflow.python import tf2 as _tf2
_tf2_gauge = _monitoring.BoolGauge('/tensorflow/api/tf2_enable',
'Environment variable TF2_BEHAVIOR is set".')
_tf2_gauge.get_cell().set(_tf2.enabled())
# Necessary for the symbols in this module to be taken into account by
# the namespace management system (API decorators).
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
# XLA JIT compiler APIs.
from tensorflow.python.compiler.xla import jit
from tensorflow.python.compiler.xla import xla
# Required due to `rnn` and `rnn_cell` not being imported in `nn` directly
# (due to a circular dependency issue: rnn depends on layers).
nn.dynamic_rnn = rnn.dynamic_rnn
nn.static_rnn = rnn.static_rnn
nn.raw_rnn = rnn.raw_rnn
nn.bidirectional_dynamic_rnn = rnn.bidirectional_dynamic_rnn
nn.static_state_saving_rnn = rnn.static_state_saving_rnn
nn.rnn_cell = rnn_cell
# Export protos
# pylint: disable=undefined-variable
tf_export(v1=['AttrValue'])(AttrValue)
tf_export(v1=['ConfigProto'])(ConfigProto)
tf_export(v1=['Event', 'summary.Event'])(Event)
tf_export(v1=['GPUOptions'])(GPUOptions)
tf_export(v1=['GraphDef'])(GraphDef)
tf_export(v1=['GraphOptions'])(GraphOptions)
tf_export(v1=['HistogramProto'])(HistogramProto)
tf_export(v1=['LogMessage'])(LogMessage)
tf_export(v1=['MetaGraphDef'])(MetaGraphDef)
tf_export(v1=['NameAttrList'])(NameAttrList)
tf_export(v1=['NodeDef'])(NodeDef)
tf_export(v1=['OptimizerOptions'])(OptimizerOptions)
tf_export(v1=['RunMetadata'])(RunMetadata)
tf_export(v1=['RunOptions'])(RunOptions)
tf_export(v1=['SessionLog', 'summary.SessionLog'])(SessionLog)
tf_export(v1=['Summary', 'summary.Summary'])(Summary)
tf_export(v1=['summary.SummaryDescription'])(SummaryDescription)
tf_export(v1=['SummaryMetadata'])(SummaryMetadata)
tf_export(v1=['summary.TaggedRunMetadata'])(TaggedRunMetadata)
tf_export(v1=['TensorInfo'])(TensorInfo)
# pylint: enable=undefined-variable
# Special dunders that we choose to export:
_exported_dunders = set([
'__version__',
'__git_version__',
'__compiler_version__',
'__cxx11_abi_flag__',
'__monolithic_build__',
])
# Expose symbols minus dunders, unless they are whitelisted above.
# This is necessary to export our dunders.
__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""If possible, exports all symbols with RTLD_GLOBAL.
Note that this file is only imported by pywrap_tensorflow.py if this is a static
build (meaning there is no explicit framework cc_binary shared object dependency
of _pywrap_tensorflow_internal.so). For regular (non-static) builds, RTLD_GLOBAL
is not necessary, since the dynamic dependencies of custom/contrib ops are
explicit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import sys
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated python library
# that dynamically loads _pywrap_tensorflow.so. The default mode for loading
# keeps all the symbol private and not visible to other libraries that may be
# loaded. Setting the mode to RTLD_GLOBAL to make the symbols visible, so that
# custom op libraries imported using `tf.load_op_library()` can access symbols
# defined in _pywrap_tensorflow.so.
_use_rtld_global = (hasattr(sys, 'getdlopenflags')
and hasattr(sys, 'setdlopenflags'))
if _use_rtld_global:
_default_dlopen_flags = sys.getdlopenflags()
def set_dlopen_flags():
if _use_rtld_global:
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL)
def reset_dlopen_flags():
if _use_rtld_global:
sys.setdlopenflags(_default_dlopen_flags)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/pywrap_dlopen_global_flags.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A wrapper for TensorFlow SWIG-generated bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import sys
import traceback
from tensorflow.python.platform import self_check
# Perform pre-load sanity checks in order to produce a more actionable error
# than we get from an error during SWIG import.
self_check.preload_check()
# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
try:
# This import is expected to fail if there is an explicit shared object
# dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.
from tensorflow.python import pywrap_dlopen_global_flags
_use_dlopen_global_flags = True
except ImportError:
_use_dlopen_global_flags = False
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated
# python library that dynamically loads _pywrap_tensorflow.so.
_can_set_rtld_local = (hasattr(sys, 'getdlopenflags')
and hasattr(sys, 'setdlopenflags'))
if _can_set_rtld_local:
_default_dlopen_flags = sys.getdlopenflags()
try:
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.set_dlopen_flags()
elif _can_set_rtld_local:
# Ensure RTLD_LOCAL behavior for platforms where it isn't the default
# (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not
# override an RTLD_GLOBAL in _default_dlopen_flags).
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)
from tensorflow.python.pywrap_tensorflow_internal import *
from tensorflow.python.pywrap_tensorflow_internal import __version__
from tensorflow.python.pywrap_tensorflow_internal import __git_version__
from tensorflow.python.pywrap_tensorflow_internal import __compiler_version__
from tensorflow.python.pywrap_tensorflow_internal import __cxx11_abi_flag__
from tensorflow.python.pywrap_tensorflow_internal import __monolithic_build__
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.reset_dlopen_flags()
elif _can_set_rtld_local:
sys.setdlopenflags(_default_dlopen_flags)
except ImportError:
msg = """%s\n\nFailed to load the native TensorFlow runtime.\n
See https://www.tensorflow.org/install/errors\n
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.""" % traceback.format_exc()
raise ImportError(msg)
# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/pywrap_tensorflow.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion of plain Python into TensorFlow graph code.
NOTE: In TensorFlow 2.0, AutoGraph is automatically applied when using
`tf.function`. This module contains lower-level APIs for advanced use.
For more information, see the
[AutoGraph guide](https://www.tensorflow.org/guide/autograph).
By equivalent graph code we mean code that generates a TensorFlow graph when
run. The generated graph has the same effects as the original code when executed
(for example with `tf.function` or `tf.compat.v1.Session.run`). In other words,
using AutoGraph can be thought of as running Python in TensorFlow.
"""
# TODO(b/119833526): Link to the new tf.function + autograph tutorial.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Bring only the relevant symbols to the top level.
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core.converter import ConversionOptions
from tensorflow.python.autograph.core.converter import Feature
from tensorflow.python.autograph.impl.api import AutoGraphError
from tensorflow.python.autograph.impl.api import convert
from tensorflow.python.autograph.impl.api import converted_call
from tensorflow.python.autograph.impl.api import do_not_convert
from tensorflow.python.autograph.impl.api import StackTraceMapper
from tensorflow.python.autograph.impl.api import to_code
from tensorflow.python.autograph.impl.api import to_graph
from tensorflow.python.autograph.lang.directives import set_element_type
from tensorflow.python.autograph.lang.directives import set_loop_options
from tensorflow.python.autograph.lang.special_functions import stack
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.util.all_util import remove_undocumented
# TODO(mdan): Revisit this list once we finalize the generated code mechanism.
_allowed_symbols = [
# Main API
'AutoGraphError',
'ConversionOptions',
'Feature',
'StackTraceMapper',
'convert',
'converted_call',
'do_not_convert',
'to_code',
'to_graph',
# Overloaded operators
'operators',
# Python language "extensions"
'set_element_type',
'set_loop_options',
'stack',
'tensor_list',
# Utilities: to be removed
'utils',
]
remove_undocumented(__name__, _allowed_symbols)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core conversion logic, serves as main point of access."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import imp
import sys
import threading
import types
import unittest
import weakref
import gast
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.converters import arg_defaults
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import conditional_expressions
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.converters import directives
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.converters import lists
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.converters import slices
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrappers
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.core import unsupported_features_checker
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.util import tf_inspect
class _ConvertedEntityFactoryInfo(
collections.namedtuple(
'_ConvertedEntityFactoryInfo',
('module_name', 'converted_name', 'factory_factory_name', 'source_map'))
):
"""Holds metadata about a converted entity stored as a dynamic factory.
The dynamic factory is assumed to be created by _wrap_into_dynamic_factory,
be named `factory_factory_name` and located inside the module named as
`module_name`.
Attributes:
module_name: Text, the name of the module containing the entity.
converted_name: Text, the name of the converted entity.
factory_factory_name: Text, the name of the dynamic factory.
source_map: Dict.
"""
def __str__(self):
return '_ConvertedEntityFactoryInfo({} in {})'.format(
self.converted_name, self.module_name)
def get_module(self):
return sys.modules[self.module_name]
def get_factory(self):
assert self.module_name in sys.modules
factory_factory = getattr(sys.modules[self.module_name],
self.factory_factory_name)
return factory_factory()
# TODO(mdan): Add a garbage collection hook for cleaning up modules.
class _ConversionCache(object):
"""A hierarchical cache that uses the converted entity as weak key.
The keys soft references (i.e. they are discarded when the key is
destroyed). The subkeys are normal hashable values.
This class is generic - see the call site for how the keys and values are
defined.
"""
def __init__(self):
self._cache = weakref.WeakKeyDictionary()
def has(self, key, subkey):
if key not in self._cache:
return False
return subkey in self._cache[key]
def __getitem__(self, key):
if key not in self._cache:
# The bucket needs to be initialized to support this usage:
# cache[key][subkey] = value
self._cache[key] = {}
return self._cache[key]
# Using a re-entrant lock to guard against the unlikely possibility that the
# conversion process tiggers additional code execution.
_CACHE_LOCK = threading.RLock()
_CACHE = _ConversionCache()
_UNCONVERTED_CACHE = _ConversionCache()
# Note: strictly speaking, a simple factory might have been sufficient for
# functions. But the double factory approach allows us to control the closure
# and globals of the converted code in a cleaner fashion.
# TODO(mdan): A simple factory may be sufficient.
def _wrap_into_dynamic_factory(nodes, entity_name, factory_factory_name,
factory_name, closure_vars, future_features):
"""Wraps an AST into the body of a dynamic factory.
This uses the dynamic factory (factory of factory) pattern to achieve the
following:
1. The inner factory, dynamically creates the entity represented by nodes.
2. The entity is parametrized by `ag__`, the internal AutoGraph module.
3. The outer factory creates the inner factory with a lexical scope
in which `closure_vars` are bound local variables. This in turn allows the
caller to control the exact closure (i.e. non-global free variables) for
the inner factory.
The AST is expected to define some symbol named by `entity_name`.
Args:
nodes: ast.AST
entity_name: Union[Text, ast.AST]
factory_factory_name: Text
factory_name: Text
closure_vars: Iterable[Text]
future_features: Iterable[Text], see EntityInfo.future_features.
Returns:
ast.AST
"""
if not isinstance(nodes, (list, tuple)):
nodes = (nodes,)
dummy_closure_defs = []
for var_name in closure_vars:
template = """
var_name = None
"""
dummy_closure_defs.extend(templates.replace(template, var_name=var_name))
if future_features:
future_imports = gast.ImportFrom(
module='__future__',
names=[gast.alias(name=name, asname=None) for name in future_features],
level=0)
else:
future_imports = []
# These dummy symbol declarations create local fariables in a function scope,
# so that the Python parser correctly marks them as free non-global variables
# upon load (that is, it creates cell slots for each symbol). Their values are
# not used, as the cells are swapped with the original entity's cells after
# the code has been loaded.
template = """
future_imports
def factory_factory_name():
dummy_closure_defs
def factory_name(ag__, ag_source_map__, ag_module__):
entity_defs
entity_name.ag_source_map = ag_source_map__
entity_name.ag_module = ag_module__
entity_name.autograph_info__ = {}
return entity_name
return factory_name
"""
return templates.replace(
template,
future_imports=future_imports,
factory_factory_name=factory_factory_name,
factory_name=factory_name,
dummy_closure_defs=dummy_closure_defs,
entity_defs=nodes,
entity_name=entity_name)
def _convert_with_cache(entity, program_ctx, free_nonglobal_var_names):
"""Returns a (possibly cached) factory for the converted result of entity."""
# The cache key is the entity's code object if it defined one, otherwise it's
# the entity itself. Keying by the code object allows caching of functions
# that are dynamically created e.g. in a loop.
if hasattr(entity, '__code__'):
key = entity.__code__
else:
key = entity
# The cache subkey encompases any conversion options on which the generated
# code may depend.
# The cached factory includes the necessary definitions to distinguish
# between the global and non-global free variables. For this reason, the
# cache subkey includes the names of the free non-globals.
subkey = (program_ctx.options, frozenset(free_nonglobal_var_names))
with _CACHE_LOCK:
# The cache values are _ConvertedEntityFactoryInfo objects.
if _CACHE.has(key, subkey):
# TODO(mdan): Check whether the module is still loaded.
converted_entity_info = _CACHE[key][subkey]
logging.log(3, 'Cache hit for entity %s key %s subkey %s: %s', entity,
key, subkey, converted_entity_info)
return converted_entity_info
logging.log(1, 'Entity %s is not cached for key %s subkey %s', entity, key,
subkey)
nodes, converted_name, entity_info = convert_entity_to_ast(
entity, program_ctx)
namer = naming.Namer(entity_info.namespace)
factory_factory_name = namer.new_symbol('create_converted_entity_factory',
())
factory_name = namer.new_symbol('create_converted_entity', ())
nodes = _wrap_into_dynamic_factory(nodes, converted_name,
factory_factory_name, factory_name,
free_nonglobal_var_names,
entity_info.future_features)
module, _, source_map = compiler.ast_to_object(
nodes, include_source_map=True)
module_name = module.__name__
converted_entity_info = _ConvertedEntityFactoryInfo(
module_name=module_name,
converted_name=converted_name,
factory_factory_name=factory_factory_name,
source_map=source_map)
_CACHE[key][subkey] = converted_entity_info
return converted_entity_info
def _instantiate(entity, converted_entity_info, free_nonglobal_var_names):
"""Creates a converted instance and binds it to match original entity."""
factory = converted_entity_info.get_factory()
# `factory` is currently bound to the empty module it was loaded from.
# It must instead be bound to the globals and closure from the original
# entity.
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
entity_globals = entity.__globals__
entity_closure = entity.__closure__ or ()
elif hasattr(entity, '__module__'):
entity_globals = sys.modules[entity.__module__].__dict__
entity_closure = ()
assert len(entity_closure) == len(free_nonglobal_var_names)
# Fit the original entity's cells to match the order of factory's cells.
original_names_and_cells = dict(zip(free_nonglobal_var_names, entity_closure))
new_factory_cells = tuple(
original_names_and_cells[name] for name in factory.__code__.co_freevars)
bound_factory = types.FunctionType(
code=factory.__code__,
globals=entity_globals,
name=factory.__name__,
argdefs=(),
closure=new_factory_cells)
# Two other free vars: the internal "ag__" module and the source
# map. These are wired via the parameters of the factory.
converted_entity = bound_factory( # pylint:disable=not-callable
ag_internal, converted_entity_info.source_map,
converted_entity_info.get_module())
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
# Attach the default argument to the converted function.
converted_entity.__defaults__ = entity.__defaults__
if hasattr(entity, '__kwdefaults__'):
converted_entity.__kwdefaults__ = entity.__kwdefaults__
return converted_entity
def convert(entity, program_ctx):
"""Converts an entity into an equivalent entity."""
if tf_inspect.isfunction(entity) or tf_inspect.ismethod(entity):
free_nonglobal_var_names = entity.__code__.co_freevars
else:
free_nonglobal_var_names = ()
for i, name in enumerate(free_nonglobal_var_names):
if (name == 'ag__' and
entity.__closure__[i].cell_contents is not ag_internal):
raise ValueError('entity {} uses the reserved symbol "{}"'.format(
entity, name))
# TODO(mdan): In extreme cases, other ag__ symbols may also be clobbered.
converted_entity_info = _convert_with_cache(entity, program_ctx,
free_nonglobal_var_names)
return _instantiate(entity, converted_entity_info, free_nonglobal_var_names)
# TODO(mdan): allow_namedtuple_subclass should be hardcoded to True.
def is_whitelisted_for_graph(
o, check_call_override=True, allow_namedtuple_subclass=False):
"""Checks whether an entity is whitelisted for use in graph mode.
Examples of whitelisted entities include all members of the tensorflow
package.
Args:
o: A Python entity.
check_call_override: Reserved for internal use. When set to `False`, it
disables the rule according to which classes are whitelisted if their
__call__ method is whitelisted.
allow_namedtuple_subclass: Reserved for internal use. When `True`,
namedtuple subclasses are not whitelisted.
Returns:
Boolean
"""
# TODO(b/120224672): Fix this.
if isinstance(o, functools.partial):
# tf_inspect.getmodule(functools.partial(...)) otherwise returns None since
# functools.partial objects do not have a __module__ attribute.
m = functools
else:
m = tf_inspect.getmodule(o)
# Examples of callables that lack a __module__ property include builtins.
if hasattr(m, '__name__'):
for rule in config.CONVERSION_RULES:
action = rule.get_action(m)
if action == config.Action.CONVERT:
logging.log(2, 'Not whitelisted: %s: %s', o, rule)
return False
elif action == config.Action.DO_NOT_CONVERT:
logging.log(2, 'Whitelisted: %s: %s', o, rule)
return True
if tf_inspect.isgeneratorfunction(o):
logging.warn(
'Entity %s appears to be a generator function. It will not be converted'
' by AutoGraph.', o)
logging.log(2, 'Whitelisted: %s: generator functions are not converted', o)
return True
if (check_call_override and not tf_inspect.isclass(o) and
hasattr(o, '__call__')):
# Callable objects: whitelisted if their __call__ method is.
# The type check avoids infinite recursion around the __call__ method
# of function objects.
if (type(o) != type(o.__call__)) and is_whitelisted_for_graph(o.__call__): # pylint: disable=unidiomatic-typecheck
logging.log(2, 'Whitelisted: %s: object __call__ whitelisted', o)
return True
owner_class = None
if tf_inspect.ismethod(o):
# Methods of whitelisted classes are also whitelisted, even if they are
# bound via user subclasses.
#
# For example, suppose `tf.Foo` has a method called `bar`, and `baz` is
# defined as below. `tf.Foo` is whitelisted. Then `baz.bar` is also
# whitelisted.
#
# class Custom(tf.Foo):
# pass
#
# baz = Custom()
#
# For the example above, if `Custom` did overload `bar`, then it would no
# longer be whitelisted.
owner_class = inspect_utils.getmethodclass(o)
if owner_class is not None:
if issubclass(owner_class, unittest.TestCase):
logging.log(2, 'Whitelisted: %s: method of TestCase subclass', o)
return True
owner_class = inspect_utils.getdefiningclass(o, owner_class)
if is_whitelisted_for_graph(
owner_class,
check_call_override=False,
allow_namedtuple_subclass=True):
logging.log(2, 'Whitelisted: %s: owner is whitelisted %s', o,
owner_class)
return True
if inspect_utils.isnamedtuple(o):
# Due to the way they're constructed, namedtuple types cannot be converted
# because they don't expose source code. But we assume they are safe for
# graph mode since they are just containers.
if allow_namedtuple_subclass:
if not any(inspect_utils.isnamedtuple(base) for base in o.__bases__):
logging.log(2, 'Whitelisted: %s: named tuple', o)
return True
else:
logging.log(2, 'Whitelisted: %s: named tuple or subclass', o)
return True
logging.log(2, 'Not whitelisted: %s: default rule', o)
return False
def check_cached_unconverted(entity, options):
try:
# Catch-all for entities that are unhashable or don't allow weakrefs.
return _UNCONVERTED_CACHE.has(entity, options)
except TypeError:
return False
def cache_unconverted(entity, options):
try:
# Catch-all for entities that are unhashable or don't allow weakrefs.
_UNCONVERTED_CACHE[entity][options] = True
except TypeError:
pass
# TODO(mdan): Rename to convert_*_node to avoid confusion with convert.
def convert_entity_to_ast(o, program_ctx):
"""Compile a Python entity into equivalent TensorFlow.
Args:
o: A Python entity.
program_ctx: A ProgramContext object.
Returns:
A tuple (ast, new_name, namespace):
* ast: An AST representing an entity with interface equivalent to `o`,
but which when executed it creates TF a graph.
* new_name: The symbol name under which the new entity can be found.
* namespace: A dict mapping all symbols visible to the converted entity,
keyed by their symbol name.
Raises:
ValueError: if the entity type is not supported.
"""
logging.log(1, 'Converting %s', o)
if tf_inspect.isclass(o):
nodes, name, entity_info = convert_class_to_ast(o, program_ctx)
elif tf_inspect.isfunction(o):
nodes, name, entity_info = convert_func_to_ast(o, program_ctx)
elif tf_inspect.ismethod(o):
nodes, name, entity_info = convert_func_to_ast(o, program_ctx)
elif hasattr(o, '__class__'):
# Note: this should only be raised when attempting to convert the object
# directly. converted_call should still support it.
raise NotImplementedError(
'cannot convert entity "{}": object conversion is not yet'
' supported.'.format(o))
else:
raise ValueError(
'Entity "%s" has unsupported type "%s". Only functions and classes are '
'supported for now.' % (o, type(o)))
if logging.has_verbosity(2):
logging.log(2, 'Compiled output of %s:\n\n%s\n', o,
compiler.ast_to_source(nodes))
if logging.has_verbosity(4):
for n in nodes:
logging.log(4, 'Compiled AST of %s:\n\n%s\n\n', o,
pretty_printer.fmt(n, color=False))
return nodes, name, entity_info
def convert_class_to_ast(c, program_ctx):
"""Specialization of `convert_entity_to_ast` for classes."""
# TODO(mdan): Revisit this altogether. Not sure we still need it.
converted_members = {}
method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)
members = tf_inspect.getmembers(c, predicate=method_filter)
if not members:
raise ValueError('cannot convert %s: no member methods' % c)
# TODO(mdan): Don't clobber namespaces for each method in one class namespace.
# The assumption that one namespace suffices for all methods only holds if
# all methods were defined in the same module.
# If, instead, functions are imported from multiple modules and then spliced
# into the class, then each function has its own globals and __future__
# imports that need to stay separate.
# For example, C's methods could both have `global x` statements referring to
# mod1.x and mod2.x, but using one namespace for C would cause a conflict.
# from mod1 import f1
# from mod2 import f2
# class C(object):
# method1 = f1
# method2 = f2
class_namespace = {}
future_features = None
for _, m in members:
# Only convert the members that are directly defined by the class.
if inspect_utils.getdefiningclass(m, c) is not c:
continue
(node,), _, entity_info = convert_func_to_ast(
m, program_ctx=program_ctx, do_rename=False)
class_namespace.update(entity_info.namespace)
converted_members[m] = node
# TODO(mdan): Similarly check the globals.
if future_features is None:
future_features = entity_info.future_features
elif frozenset(future_features) ^ frozenset(entity_info.future_features):
# Note: we can support this case if ever needed.
raise ValueError(
'cannot convert {}: if has methods built with mismatched future'
' features: {} and {}'.format(c, future_features,
entity_info.future_features))
namer = naming.Namer(class_namespace)
class_name = namer.class_name(c.__name__)
# Process any base classes: if the superclass if of a whitelisted type, an
# absolute import line is generated.
output_nodes = []
renames = {}
base_names = []
for base in c.__bases__:
if isinstance(object, base):
base_names.append('object')
continue
if is_whitelisted_for_graph(base):
alias = namer.new_symbol(base.__name__, ())
output_nodes.append(
gast.ImportFrom(
module=base.__module__,
names=[gast.alias(name=base.__name__, asname=alias)],
level=0))
else:
raise NotImplementedError(
'Conversion of classes that do not directly extend classes from'
' whitelisted modules is temporarily suspended. If this breaks'
' existing code please notify the AutoGraph team immediately.')
base_names.append(alias)
renames[qual_names.QN(base.__name__)] = qual_names.QN(alias)
# Generate the definition of the converted class.
bases = [
gast.Name(n, ctx=gast.Load(), annotation=None, type_comment=None)
for n in base_names]
class_def = gast.ClassDef(
class_name,
bases=bases,
keywords=[],
body=list(converted_members.values()),
decorator_list=[])
# Make a final pass to replace references to the class or its base classes.
# Most commonly, this occurs when making super().__init__() calls.
# TODO(mdan): Making direct references to superclass' superclass will fail.
class_def = qual_names.resolve(class_def)
renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name)
class_def = ast_util.rename_symbols(class_def, renames)
output_nodes.append(class_def)
# TODO(mdan): Find a way better than forging this object.
entity_info = transformer.EntityInfo(
source_code=None,
source_file=None,
future_features=future_features,
namespace=class_namespace)
return output_nodes, class_name, entity_info
def _add_reserved_symbol(namespace, name, entity):
if name not in namespace:
namespace[name] = entity
elif namespace[name] != entity:
raise ValueError('The name "%s" is reserved and may not be used.' % name)
ag_internal = None
# TODO(mdan): Move into core or replace with an actual importable module.
def _add_self_references(namespace, autograph_module):
"""Adds namespace references to the module that exposes the api itself."""
global ag_internal
if ag_internal is None:
# Craft a module that exposes parts of the external API as well as certain
# internal modules.
ag_internal = imp.new_module('autograph')
ag_internal.__dict__.update(autograph_module.__dict__)
ag_internal.ConversionOptions = converter.ConversionOptions
ag_internal.STD = converter.STANDARD_OPTIONS
ag_internal.Feature = converter.Feature
ag_internal.utils = utils
ag_internal.FunctionScope = function_wrappers.FunctionScope
ag_internal.with_function_scope = function_wrappers.with_function_scope
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
ag_internal.__dict__.update(special_functions.__dict__)
ag_internal.__dict__.update(operators.__dict__)
_add_reserved_symbol(namespace, 'ag__', ag_internal)
def convert_func_to_ast(f, program_ctx, do_rename=True):
"""Specialization of `convert_entity_to_ast` for callable functions."""
future_features = inspect_utils.getfutureimports(f)
node, source = parser.parse_entity(f, future_features=future_features)
logging.log(3, 'Source code of %s:\n\n%s\n', f, source)
# Parsed AST should contain future imports and one function def node.
# In general, the output of inspect.getsource is inexact for lambdas because
# it uses regex matching to adjust the exact location around the line number
# that CPython records. Then, the entire containing line is returned, which
# we may have trouble disambiguating. For example:
# x, y = lambda: 1, lambda: 2
if f.__name__ == '<lambda>':
nodes = ast_util.find_matching_definitions(node, f)
if len(nodes) != 1:
raise ValueError(
'Unable to identify source code of lambda function {}. It was'
' defined on this line: {}, which must contain a single lambda with'
' matching signature. To avoid ambiguity, define each lambda'
' in a separate expression.'.format(f, source))
node, = nodes
# TODO(znado): Place inside standard_analysis.
origin_info.resolve_entity(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = naming.Namer(namespace)
if isinstance(node, gast.Lambda):
new_name = namer.new_symbol('tf__lambda', ())
elif do_rename:
new_name = namer.function_name(f.__name__)
else:
new_name = f.__name__
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
future_features=future_features,
namespace=namespace)
context = converter.EntityContext(namer, entity_info, program_ctx, new_name)
node = node_to_graph(node, context)
if isinstance(node, gast.Lambda):
node = gast.Assign(
targets=[
gast.Name(
new_name, ctx=gast.Store(), annotation=None, type_comment=None)
],
value=node)
elif do_rename:
node.name = new_name
else:
assert node.name == new_name
return (node,), new_name, entity_info
def node_to_graph(node, context):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
Returns:
A tuple (node, deps):
* node: A Python ast node, representing the converted code.
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
# TODO(mdan): Insert list_comprehensions somewhere.
unsupported_features_checker.verify(node)
node = converter.standard_analysis(node, context, is_initial=True)
node = converter.apply_(node, context, function_scopes)
node = converter.apply_(node, context, arg_defaults)
node = converter.apply_(node, context, directives)
node = converter.apply_(node, context, break_statements)
if context.program.options.uses(converter.Feature.ASSERT_STATEMENTS):
node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = converter.apply_(node, context, continue_statements)
node = converter.apply_(node, context, return_statements)
if context.program.options.uses(converter.Feature.LISTS):
node = converter.apply_(node, context, lists)
node = converter.apply_(node, context, slices)
node = converter.apply_(node, context, call_trees)
node = converter.apply_(node, context, control_flow)
node = converter.apply_(node, context, conditional_expressions)
node = converter.apply_(node, context, logical_expressions)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/impl/conversion.py
|
# python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class ApiTest(test.TestCase):
def test_converted_call_kwonly_args(self):
def test_fn(*, a):
return a
x = api.converted_call(test_fn, converter.ConversionOptions(recursive=True),
(), {'a': constant_op.constant(-1)})
self.assertEqual(-1, self.evaluate(x))
def test_super_with_no_arg(self):
test_case_self = self
class TestBase:
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def no_arg(self, x):
return super().plus_three(x)
tc = api.converted_call(TestSubclass,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(5, tc.no_arg(2))
if __name__ == '__main__':
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/impl/api_py3_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains the user-facing API for AutoGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import inspect
import os
import pdb
import re
import sys
import textwrap
import traceback
# pylint:disable=g-bad-import-order
import six
# pylint:enable=g-bad-import-order
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.framework import errors_impl
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.python.util.tf_export import tf_export
def is_autograph_strict_conversion_mode():
return int(os.environ.get('AUTOGRAPH_STRICT_CONVERSION', '0')) > 0
# TODO(mdan): Export this symbol.
class AutoGraphError(Exception):
"""Base class for all AutoGraph exceptions."""
pass
class ConversionError(AutoGraphError):
"""Raised during the conversion process."""
pass
class StagingError(AutoGraphError):
"""Raised during the staging (i.e. Python execution) of converted code."""
pass
class _ErrorMetadata(errors.ErrorMetadataBase):
"""AutoGraph-specific error metadata. See base class."""
def create_exception(self, source_error):
preferred_type = type(source_error)
if issubclass(preferred_type, errors_impl.OpError):
# Best-effort unpacking of OpError exceptions.
# TODO(mdan): Use a mechanism that is more future-proof.
init_argspec = tf_inspect.getfullargspec(preferred_type.__init__)
message = self.get_message()
init_args = tuple(init_argspec.args)
# At the time of this writing, TF errors either take 3 or 4 arguments,
# with the fourth being error_code.
if init_args == ('self', 'node_def', 'op', 'message', 'error_code'):
return preferred_type(
node_def=source_error.node_def,
op=source_error.op,
message=message,
error_code=self.error_code)
elif init_args == ('self', 'node_def', 'op', 'message'):
if 'error_code' in init_argspec.kwonlyargs:
return preferred_type(
node_def=source_error.node_def,
op=source_error.op,
message=message,
errro_code=self.error_code)
else:
return preferred_type(
node_def=source_error.node_def,
op=source_error.op,
message=message)
elif preferred_type in (AutoGraphError, ConversionError, StagingError,
errors_impl.InaccessibleTensorError,
errors_impl.OperatorNotAllowedInGraphError):
return preferred_type(self.get_message())
exc = super(_ErrorMetadata, self).create_exception(source_error)
if exc is not None:
return exc
# Note: While changing an error's message property to change the message it
# displays will probably work a lot of times, there is no standard way in
# Python to do that. The safest way is therefore to create a new exception.
# For user defined exceptions, we could define an interface that allowed
# them to work under this mechanism.
return StagingError(self.get_message())
class StackTraceMapper(tf_stack.StackTraceMapper):
"""Remaps generated code to code it originated from."""
def __init__(self, converted_fn):
self._source_map = converted_fn.ag_source_map
def get_effective_source_map(self):
effective_source_map = self._effective_source_map
if effective_source_map is None:
if self.parent is not None:
parent_map = self.parent.get_effective_source_map()
else:
parent_map = {}
effective_source_map = {}
for loc, origin in self._source_map.items():
effective_source_map[(loc.filename, loc.lineno)] = (
origin.loc.filename, origin.loc.lineno, origin.function_name)
for key, value in parent_map.items():
filename, lineno, _ = value
value_loc = origin_info.LineLocation(filename=filename, lineno=lineno)
if value_loc in self._source_map:
origin = self._source_map[value_loc]
effective_source_map[key] = (
origin.loc.filename, origin.loc.lineno, origin.function_name)
else:
effective_source_map[key] = value
self._effective_source_map = effective_source_map
return effective_source_map
def tf_convert(f, ctx, convert_by_default=True, user_requested=False):
"""Decorator that applies AutoGraph to a function.
Use in internal APIs.
This API is suitable for high order functions internal to the TensorFlow API,
and more generally any function to which Autograph is not applied.
Guidance: convert was a decorator meant for use directly by developers, and
will be soon deprecated in favor of tf.function. tf_convert is to be called
from high order functions internal to TF.
Args:
f: Callable.
ctx: ag_ctx.ControlStatusCtx, the Autograph context in which `f` is used.
convert_by_default: bool, whether to use AutoGraph when the context doesn't
specify.
user_requested: bool, whether to ignore the conversion whitelist. See
ConversionOptions.user_requested.
Returns:
Either `f or the converted version of `f`.
"""
if hasattr(f, '__ag_compiled'):
return f
f_wrapper = f
decorators, f = tf_decorator.unwrap(f)
# TODO(mdan): Grab features from context.
if ctx.status == ag_ctx.Status.ENABLED:
wrapper = convert(recursive=True, user_requested=user_requested)(f)
elif ctx.status == ag_ctx.Status.DISABLED:
wrapper = do_not_convert(f)
elif ctx.status == ag_ctx.Status.UNSPECIFIED:
if convert_by_default:
wrapper = convert(recursive=True, user_requested=user_requested)(f)
else:
wrapper = call_with_unspecified_conversion_status(f)
else:
raise ValueError(ctx.status)
if decorators:
wrapper = tf_decorator.rewrap(f_wrapper, f, wrapper)
setattr(wrapper, '__ag_compiled', True)
return wrapper
# TODO(mdan): Make private.
def convert(recursive=False, optional_features=None, user_requested=True):
"""Decorator that compiles a function to use TensorFlow ops.
The decorator is dynamic - it recompiles the target whenever the decorated
function is called. This means the parameter values are known at conversion.
It also means that repeated calls with different types of parameters will be
correctly processed.
Args:
recursive: bool, whether to recursively convert any functions or classes
that the converted function may use.
optional_features: converted.Feature, allows toggling optional or
experimental features. When set to None, only the core features are
enabled.
user_requested: bool, whether to ignore the conversion whitelist. See
ConversionOptions.user_requested.
Returns:
Callable, a decorator that converts the given function into an equivalent
function that uses TensorFlow ops.
"""
def decorator(f):
"""Decorator implementation."""
def wrapper(*args, **kwargs):
"""Wrapper that calls the converted version of f."""
options = converter.ConversionOptions(
recursive=recursive,
user_requested=user_requested,
optional_features=optional_features)
try:
return converted_call(f, options, args, kwargs)
except Exception as e: # pylint:disable=broad-except
if hasattr(e, 'ag_error_metadata'):
raise e.ag_error_metadata.to_exception(e)
else:
raise
if inspect.isfunction(f) or inspect.ismethod(f):
wrapper = functools.update_wrapper(wrapper, f)
decorated_wrapper = tf_decorator.make_decorator(f, wrapper)
# Sometimes the decorator is just desugared, making it impossible to detect.
# This attribute makes detection easier.
setattr(decorated_wrapper, '__ag_compiled', True)
return decorated_wrapper
return decorator
def call_with_unspecified_conversion_status(func):
"""Decorator that resets the conversion context to the unspecified status."""
def wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED):
return func(*args, **kwargs)
if inspect.isfunction(func) or inspect.ismethod(func):
wrapper = functools.update_wrapper(wrapper, func)
setattr(wrapper, '__ag_compiled', True)
return wrapper
def do_not_convert_internal(f):
"""Decorator that marks internal functions which do not need conversion."""
setattr(f, '__ag_compiled', True)
return f
@tf_export('autograph.experimental.do_not_convert')
def do_not_convert(func=None):
"""Decorator that suppresses the conversion of a function.
Args:
func: function to decorate.
Returns:
If `func` is not None, returns a `Callable` which is equivalent to
`func`, but is not converted by AutoGraph.
If `func` is None, returns a decorator that, when invoked with a
single `func` argument, returns a `Callable` equivalent to the
above case.
"""
if func is None:
return do_not_convert
def wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
return func(*args, **kwargs)
if inspect.isfunction(func) or inspect.ismethod(func):
wrapper = functools.update_wrapper(wrapper, func)
setattr(wrapper, '__ag_compiled', True)
return wrapper
def _attach_metadata(e, f, converted):
"""Augments an error with the metadata necessary for rewrite."""
if hasattr(e, 'ag_pass_through'):
return
metadata = getattr(e, 'ag_error_metadata', None)
source_map = f.ag_source_map if converted else {}
if metadata is None:
logging.log(
1, 'Caught error in %s (converted=%s)', f, converted, exc_info=True)
message = '{}: {}'.format(e.__class__.__name__, e)
else:
message = None
cause_tb = traceback.extract_tb(sys.exc_info()[2])[1:]
e.ag_error_metadata = _ErrorMetadata(cause_tb, metadata, message, source_map)
def _call_unconverted(f, args, kwargs, options, update_cache=True):
"""Calls the original function without converting with AutoGraph."""
if update_cache:
conversion.cache_unconverted(f, options)
if inspect_utils.istfmethodtarget(f):
return f.__self__.call(args, kwargs)
try:
if kwargs is not None:
return f(*args, **kwargs)
else:
return f(*args)
except Exception as e: # pylint:disable=broad-except
_attach_metadata(e, f, False)
raise
def _is_known_loaded_type(f, module_name, entity_name):
"""Tests whether the function or method is an instance of a known type."""
if (module_name not in sys.modules or
not hasattr(sys.modules[module_name], entity_name)):
return False
type_entity = getattr(sys.modules[module_name], entity_name)
if isinstance(f, type_entity):
# The method if of this type. Example:
#
# o = ClassType()
# function(o.method)()
return True
# Note: inspect is required here, to avoid unpacking tf.function decorators.
if inspect.ismethod(f):
# The the unbound method if of this type. Example:
#
# class ClassType:
# @function
# def method(self):
# ...
# o = ClassType()
# o.method()
if isinstance(f.__func__, type_entity):
return True
return False
def converted_call(f, options, args, kwargs, caller_fn_scope=None):
"""Compiles a function call inline.
For internal use only.
Args:
f: The function to convert.
options: converter.ConversionOptions
args: Tuple, the original positional arguments of f
kwargs: Dict, the original keyword arguments of f
caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
scope of the converted function in which this call was originally made.
Returns:
Any, the result of executing a possibly-converted `f` with the given
arguments.
"""
logging.log(1, 'Converted call: %s\n args: %s\n kwargs: %s\n', f, args,
kwargs)
if conversion.check_cached_unconverted(f, options):
return _call_unconverted(f, args, kwargs, options, False)
if inspect_utils.isbuiltin(f):
if f is eval:
return py_builtins.eval_in_original_context(f, args, caller_fn_scope)
if f is super:
return py_builtins.super_in_original_context(f, args, caller_fn_scope)
if kwargs:
return py_builtins.overload_of(f)(*args, **kwargs)
else:
return py_builtins.overload_of(f)(*args)
# TODO(mdan): Clean up the naming inconsistency.
if hasattr(f, 'autograph_info__') or hasattr(f, '__ag_compiled'):
logging.log(2, 'Permanently whitelisted: %s: already converted', f)
return _call_unconverted(f, args, kwargs, options)
# TODO(b/122265385): Remove this bypass.
if (_is_known_loaded_type(f, 'wrapt', 'FunctionWrapper') or
_is_known_loaded_type(f, 'wrapt', 'BoundFunctionWrapper')):
logging.warn(
'Entity {} appears to be decorated by wrapt, which is not yet supported'
' by AutoGraph. The function will be called without transformation.'
' You may however apply AutoGraph before the decorator.'.format(f))
logging.log(2, 'Permanently whitelisted: %s: wrapt decorated', f)
return _call_unconverted(f, args, kwargs, options)
if _is_known_loaded_type(f, 'functools', '_lru_cache_wrapper'):
logging.log(2, 'Permanently whitelisted: %s: lru_cache', f)
return _call_unconverted(f, args, kwargs, options)
# Constructors are permanently whitelisted.
# TODO(mdan): Toggle as experimental feature instead.
# TODO(b/124016764): Remove this limitation.
if tf_inspect.isclass(f):
logging.log(2, 'Permanently whitelisted: %s: constructor', f)
return _call_unconverted(f, args, kwargs, options)
# Other built-in modules are permanently whitelisted.
# TODO(mdan): Figure out how to do this consistently for all stdlib modules.
if any(
f in m.__dict__.values() for m in (collections, pdb, copy, inspect, re)):
logging.log(2, 'Permanently whitelisted: %s: part of builtin module', f)
return _call_unconverted(f, args, kwargs, options)
# Custom ops and kernels are also permanently whitelisted.
# See tensorflow.framework.load_library.
if (hasattr(f, '__module__') and
hasattr(f.__module__, '_IS_TENSORFLOW_PLUGIN')):
logging.log(2, 'Permanently whitelisted: %s: TensorFlow plugin', f)
return _call_unconverted(f, args, kwargs, options)
if not options.user_requested and conversion.is_whitelisted_for_graph(f):
return _call_unconverted(f, args, kwargs, options)
# internal_convert_user_code is for example turned off when issuing a dynamic
# call conversion from generated code while in nonrecursive mode. In that
# case we evidently don't want to recurse, but we still have to convert
# things like builtins.
if not options.internal_convert_user_code:
return _call_unconverted(f, args, kwargs, options)
# TODO(mdan): Move this entire block inside to_graph.
try: # Begin of transformation error guards
# Unwrap functools.partial objects
# TODO(mdan): Consider sharing unwrapping logic with tf_inspect.
# TODO(b/120224672): This unwrapping should be done before the checks above.
while isinstance(f, functools.partial):
args = f.args + args
new_kwargs = {}
if f.keywords is not None:
new_kwargs.update(f.keywords)
if kwargs is not None:
new_kwargs.update(kwargs)
kwargs = new_kwargs
f = f.func
if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
# Regular functions
target_entity = f
f_self = inspect_utils.getmethodself(f)
# TODO(b/119246461): This may be more elegantly handled using __get__?
if f_self is not None:
effective_args = (f_self,) + args
else:
effective_args = args
elif hasattr(f, '__call__') and hasattr(f, '__class__'):
# Callable objects
target_entity = f.__call__
effective_args = (f,) + args
elif tf_inspect.isclass(f):
# Constructors
# Note: Until we support class constructurs, and enable whole-class
# conversion with an experimental flag, this branch is dead code.
# TODO(mdan): Consider removing unless there is a compelling use case.
target_entity = f
effective_args = args
else:
target_entity = f
raise NotImplementedError('unknown callable type "%s"' % type(f))
if not tf_inspect.isclass(target_entity):
if not hasattr(target_entity, '__code__'):
logging.log(2, 'Permanently whitelisted: %s: native binding',
target_entity)
return _call_unconverted(f, args, kwargs, options)
elif (hasattr(target_entity.__code__, 'co_filename') and
target_entity.__code__.co_filename == '<string>'):
# TODO(mdan): __globals__['txt'] might work in Py3.
logging.log(2, 'Permanently whitelisted: %s: dynamic code (exec?)',
target_entity)
return _call_unconverted(f, args, kwargs, options)
program_ctx = converter.ProgramContext(
options=options, autograph_module=tf_inspect.getmodule(converted_call))
converted_f = conversion.convert(target_entity, program_ctx)
if logging.has_verbosity(2):
logging.log(2, 'Defaults of %s : %s', converted_f,
converted_f.__defaults__)
if six.PY3:
logging.log(2, 'KW defaults of %s : %s',
converted_f, converted_f.__kwdefaults__)
if kwargs is not None:
callargs = tf_inspect.getcallargs(converted_f, *effective_args,
**kwargs)
else:
callargs = tf_inspect.getcallargs(converted_f, *effective_args)
formatted_callargs = '\n'.join(
' {}: {}'.format(k, v) for k, v in callargs.items())
logging.log(2, 'Calling %s with\n%s\n', converted_f, formatted_callargs)
except Exception as e: # pylint:disable=broad-except
logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
if is_autograph_strict_conversion_mode():
raise
logging.warn(
'Entity %s could not be transformed and will be executed as-is.'
' Please report this to the AutoGraph team. When filing the bug, set'
' the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and'
' attach the full output. Cause: %s', target_entity, e)
return _call_unconverted(f, args, kwargs, options)
with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
try:
if kwargs is not None:
result = converted_f(*effective_args, **kwargs)
else:
result = converted_f(*effective_args)
except Exception as e:
_attach_metadata(e, converted_f, True)
raise
return result
@tf_export('autograph.to_graph', v1=[])
def to_graph(entity, recursive=True, experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
_Example Usage_
```python
def foo(x):
if x > 0:
y = x * x
else:
y = -x
return y
converted_foo = to_graph(foo)
x = tf.constant(1)
y = converted_foo(x) # converted_foo is a TensorFlow Op-like.
assert is_tensor(y)
```
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
try:
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(
recursive=recursive,
user_requested=True,
optional_features=experimental_optional_features),
autograph_module=tf_inspect.getmodule(to_graph))
return conversion.convert(entity, program_ctx)
except (ValueError, AttributeError, KeyError, NameError, AssertionError) as e:
logging.error(1, 'Error converting %s', entity, exc_info=True)
raise ConversionError('converting {}: {}: {}'.format(
entity, e.__class__.__name__, str(e)))
@tf_export(v1=['autograph.to_graph'])
def to_graph_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
_Example Usage_
```python
def foo(x):
if x > 0:
y = x * x
else:
y = -x
return y
converted_foo = to_graph(foo)
x = tf.constant(1)
y = converted_foo(x) # converted_foo is a TensorFlow Op-like.
assert is_tensor(y)
```
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
del arg_types
del arg_values
return to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@tf_export(v1=['autograph.to_code'])
def to_code_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
indentation=' ',
experimental_optional_features=None):
"""Similar to `to_graph`, but returns Python source code as a string.
Also see: `tf.autograph.to_graph`.
`to_graph` returns the Python source code that can be used to generate a
TensorFlow graph that is functionally identical to the input Python code.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
indentation: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
The converted code as string.
"""
del arg_values
del arg_types
del indentation
return to_code(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@tf_export('autograph.to_code', v1=[])
def to_code(entity, recursive=True, experimental_optional_features=None):
"""Similar to `to_graph`, but returns Python source code as a string.
Also see: `tf.autograph.to_graph`.
`to_graph` returns the Python source code that can be used to generate a
TensorFlow graph that is functionally identical to the input Python code.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value. Controls the use of optional
features in the conversion process.
Returns:
The converted code as string.
"""
source = tf_inspect.getsource(
to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features))
return textwrap.dedent(source)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/impl/api.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
import threading
import gast
import six
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.engine import training
from tensorflow.python.platform import test
class ConversionTest(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_whitelisted_for_graph(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_whitelisted_for_graph(test_fn))
self.assertTrue(conversion.is_whitelisted_for_graph(utils))
self.assertTrue(conversion.is_whitelisted_for_graph(constant_op.constant))
def test_is_whitelisted_for_graph_tensorflow_like(self):
tf_like = imp.new_module('tensorflow_foo')
def test_fn():
pass
tf_like.test_fn = test_fn
test_fn.__module__ = tf_like
self.assertFalse(conversion.is_whitelisted_for_graph(tf_like.test_fn))
def test_is_whitelisted_for_graph_callable_whitelisted_call(self):
whitelisted_mod = imp.new_module('test_whitelisted_call')
sys.modules['test_whitelisted_call'] = whitelisted_mod
config.CONVERSION_RULES = ((config.DoNotConvert('test_whitelisted_call'),) +
config.CONVERSION_RULES)
class TestClass(object):
def __call__(self):
pass
def whitelisted_method(self):
pass
TestClass.__module__ = 'test_whitelisted_call'
if six.PY2:
TestClass.__call__.__func__.__module__ = 'test_whitelisted_call'
else:
TestClass.__call__.__module__ = 'test_whitelisted_call'
class Subclass(TestClass):
def converted_method(self):
pass
tc = Subclass()
self.assertTrue(conversion.is_whitelisted_for_graph(TestClass.__call__))
self.assertTrue(conversion.is_whitelisted_for_graph(tc))
self.assertTrue(conversion.is_whitelisted_for_graph(tc.__call__))
self.assertTrue(conversion.is_whitelisted_for_graph(tc.whitelisted_method))
self.assertFalse(conversion.is_whitelisted_for_graph(Subclass))
self.assertFalse(conversion.is_whitelisted_for_graph(tc.converted_method))
def test_convert_entity_to_ast_unsupported_types(self):
with self.assertRaises(NotImplementedError):
program_ctx = self._simple_program_ctx()
conversion.convert_entity_to_ast('dummy', program_ctx)
def test_convert_entity_to_ast_callable(self):
b = 2
def f(a):
return a + b
program_ctx = self._simple_program_ctx()
nodes, name, info = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertIs(info.namespace['b'], b)
def test_convert_entity_to_ast_function_with_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertEqual(
compiler.ast_to_source(
fn_node.args.defaults[0], include_encoding_marker=False).strip(),
'None')
def test_convert_entity_to_ast_call_tree(self):
def g(a):
return a
def f(a):
return g(a)
program_ctx = self._simple_program_ctx()
nodes, _, _ = conversion.convert_entity_to_ast(f, program_ctx)
f_node, = nodes
self.assertEqual('tf__f', f_node.name)
def test_convert_entity_to_ast_class_hierarchy(self):
class TestBase(object):
def __init__(self, x='base'):
self.x = x
def foo(self):
return self.x
def bar(self):
return self.x
class TestSubclass(TestBase):
def __init__(self, y):
super(TestSubclass, self).__init__('sub')
self.y = y
def foo(self):
return self.y
def baz(self):
return self.y
program_ctx = self._simple_program_ctx()
with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'):
conversion.convert_entity_to_ast(TestSubclass, program_ctx)
def test_convert_entity_to_ast_class_hierarchy_whitelisted(self):
class TestSubclass(training.Model):
def __init__(self, y):
super(TestSubclass, self).__init__()
self.built = False
def call(self, x):
return 3 * x
program_ctx = self._simple_program_ctx()
(import_node, class_node), name, _ = conversion.convert_entity_to_ast(
TestSubclass, program_ctx)
self.assertEqual(import_node.names[0].name, 'Model')
self.assertEqual(name, 'TfTestSubclass')
self.assertEqual(class_node.name, 'TfTestSubclass')
def test_convert_entity_to_ast_lambda(self):
b = 2
f = lambda x: b * x if x > 0 else -x
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_entity_to_ast_multiple_lambdas(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda y: b * y)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['a'], a)
def test_convert_entity_to_ast_multiple_lambdas_ambiguous_definitions(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda x: b * x)
program_ctx = self._simple_program_ctx()
with self.assertRaises(ValueError):
conversion.convert_entity_to_ast(f, program_ctx)
def test_convert_entity_to_ast_lambda_code_with_garbage(self):
# pylint:disable=g-long-lambda
f = ( # intentional wrap
lambda x: (
x # intentional wrap
+ 1),)[0]
# pylint:enable=g-long-lambda
program_ctx = self._simple_program_ctx()
(fn_node,), name, _ = conversion.convert_entity_to_ast(f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
def test_convert_entity_to_ast_nested_functions(self):
b = 2
def f(x):
def g(x):
return b * x
return g(x)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual(fn_node.name, 'tf__f')
self.assertEqual('tf__f', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_concurrency(self):
def test_fn():
pass
generated_file_names = []
def conversion_thread():
new_f = conversion.convert(test_fn, self._simple_program_ctx())
generated_file_names.append(new_f.__code__.co_filename)
threads = tuple(
threading.Thread(target=conversion_thread) for _ in range(10))
for t in threads:
t.start()
for t in threads:
t.join()
# Races would potentially create multiple files (non-deterministically,
# but with high likelihood).
self.assertEqual(len(set(generated_file_names)), 1)
def test_convert_reentrance(self):
def test_fn():
pass
# There are no known ways to cause convert to re-enter. So we instrument
# an internal function to do that instead.
old_node_to_graph = conversion.node_to_graph
self.num_conversions = 0
def node_to_graph_wrapper(node, context):
self.num_conversions += 1
if self.num_conversions < 2:
conversion.convert(test_fn, self._simple_program_ctx())
return old_node_to_graph(node, context)
try:
conversion.node_to_graph = node_to_graph_wrapper
new_f = conversion.convert(test_fn, self._simple_program_ctx())
self.assertIsNotNone(new_f)
finally:
conversion.node_to_graph = old_node_to_graph
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/impl/conversion_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import gc
import imp
import os
import re
import textwrap
import types
import numpy as np
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
tf = utils.fake_tf()
global_n = 2
class TestResource(object):
def __init__(self):
self.x = 3
class ApiTest(test.TestCase):
@test_util.run_deprecated_v1
def test_decorator_recursive(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_decorator_not_recursive(self):
class TestClass(object):
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=False)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
@test_util.run_deprecated_v1
def test_convert_then_do_not_convert(self):
class TestClass(object):
@api.do_not_convert
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant((2, 4)), constant_op.constant(1),
constant_op.constant(-2))
self.assertAllEqual((0, 1), self.evaluate(x))
@test_util.run_deprecated_v1
def test_decorator_calls_decorated(self):
class TestClass(object):
@api.convert()
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.cached_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_decorator_preserves_argspec(self):
class TestClass(object):
def test_method(self, a):
if a < 0:
a = -a
return a
test_method_converted = api.convert()(test_method)
tc = TestClass()
self.assertListEqual(
list(tf_inspect.getfullargspec(tc.test_method)),
list(tf_inspect.getfullargspec(tc.test_method_converted)))
def test_do_not_convert_argspec(self):
class TestClass(object):
def test_method(self, x, y):
z = x + y
return z
test_method_whitelisted = api.do_not_convert(test_method)
tc = TestClass()
self.assertTrue(tf_inspect.ismethod(tc.test_method_whitelisted))
# Because the wrapped function is not generated, we can't preserve its
# arg spec.
self.assertEqual((),
tuple(function_utils.fn_args(tc.test_method_whitelisted)))
def test_do_not_convert_callable_object(self):
class TestClass(object):
def __call__(self):
return 1
tc = TestClass()
self.assertEqual(1, api.do_not_convert(tc)())
@test_util.run_deprecated_v1
def test_convert_call_site_decorator(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= api.converted_call(self.called_member,
converter.ConversionOptions(recursive=True),
(a,), {})
return x
tc = TestClass()
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_converted_call_builtin(self):
x = api.converted_call(range, converter.ConversionOptions(recursive=True),
(3,), {})
self.assertEqual((0, 1, 2), tuple(x))
x = api.converted_call(re.compile,
converter.ConversionOptions(recursive=True),
('mnas_v4_a.*\\/.*(weights|kernel):0$',), {})
self.assertIsNotNone(x.match('mnas_v4_a/weights:0'))
def test_converted_call_function(self):
def test_fn(x):
if x < 0:
return -x
return x
x = api.converted_call(test_fn, converter.ConversionOptions(recursive=True),
(constant_op.constant(-1),), {})
self.assertEqual(1, self.evaluate(x))
@test_util.run_v1_only('b/120545219')
def test_converted_call_functools_partial(self):
def test_fn(x, y, z):
if x < 0:
return -x, -y, -z
return x, y, z
x = api.converted_call(
functools.partial(test_fn, constant_op.constant(-1), z=-3),
converter.ConversionOptions(recursive=True),
(constant_op.constant(-2),), {})
self.assertEqual((1, 2, 3), self.evaluate(x))
x = api.converted_call(
functools.partial(
functools.partial(test_fn, constant_op.constant(-1)), z=-3),
converter.ConversionOptions(recursive=True),
(constant_op.constant(-2),), {})
self.assertEqual((1, 2, 3), self.evaluate(x))
def test_converted_call_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_synthetic_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_function(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
test_method = types.MethodType(test_function, tc)
x = api.converted_call(test_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_wrapper(self):
class TestClass(object):
def foo(self):
pass
tc = TestClass()
# `method.__get__()` returns a so-called method-wrapper.
wrapper = api.converted_call(tc.foo.__get__,
converter.ConversionOptions(recursive=True),
(tc,), {})
self.assertEqual(wrapper, tc.foo)
def test_converted_call_method_as_object_attribute(self):
class AnotherClass(object):
def __init__(self):
self.another_class_attr = constant_op.constant(1)
def method(self):
if self.another_class_attr > 0:
return self.another_class_attr + 1
return self.another_class_attr + 10
class TestClass(object):
def __init__(self, another_obj_method):
self.another_obj_method = another_obj_method
obj = AnotherClass()
tc = TestClass(obj.method)
x = api.converted_call(tc.another_obj_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(self.evaluate(x), 2)
def test_converted_call_method_converts_recursively(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def other_method(self):
if self.x < 0:
return -self.x
return self.x
def test_method(self):
return self.other_method()
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_by_class(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(TestClass.test_method,
converter.ConversionOptions(recursive=True), (tc,),
{})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_object(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def __call__(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc, converter.ConversionOptions(recursive=True), (),
{})
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_metaclass(self):
class TestMetaclass(type):
x = constant_op.constant(-1)
def __call__(cls):
if cls.x < 0:
cls.x = -cls.x
return cls
tc = TestMetaclass('TestClass', (), {})
# This functools.partial will hide the class form the constructor
# check. Not ideal. See b/120224672.
tc = functools.partial(tc)
converted_tc = api.converted_call(
tc, converter.ConversionOptions(recursive=True), (), {})
self.assertIsInstance(converted_tc, TestMetaclass)
self.assertEqual(1, self.evaluate(converted_tc.x))
@test_util.run_deprecated_v1
def test_converted_call_constructor(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = api.converted_call(TestClass,
converter.ConversionOptions(recursive=True),
(constant_op.constant(-1),), {})
# tc is still a TestClass - constructors are whitelisted.
# TODO(b/124016764): Support this use case.
# The error below is specific to the `if` statement not being converted.
with self.assertRaises(TypeError):
tc.test_method()
def test_converted_call_mangled_properties(self):
class TestClass(object):
def __init__(self, x):
self.__private = x
def test_method(self):
if self.__private < 0:
return self.__private
return self.__private
tc = TestClass(constant_op.constant(-1))
# The error below is specific to the `if` statement not being converted.
with self.assertRaisesRegex(NotImplementedError, 'Mangled names'):
api.converted_call(tc.test_method,
converter.ConversionOptions(recursive=True), (), {})
tc.test_method()
def test_converted_call_already_converted(self):
def f(x):
return x == 0
x = api.converted_call(f, converter.ConversionOptions(recursive=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f,
converter.ConversionOptions(recursive=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
def test_converted_call_then_already_converted_dynamic(self):
@api.convert()
def g(x):
if x > 0:
return x
else:
return -x
def f(g, x):
return g(x)
x = api.converted_call(f, converter.ConversionOptions(recursive=True),
(g, constant_op.constant(1)), {})
self.assertEqual(self.evaluate(x), 1)
def test_converted_call_forced_when_explicitly_whitelisted(self):
@api.do_not_convert()
def f(x):
return x + 1
x = api.converted_call(
f, converter.ConversionOptions(recursive=True, user_requested=True),
(constant_op.constant(0),), {})
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f,
converter.ConversionOptions(recursive=True), (0,),
{})
self.assertEqual(x, 1)
@test_util.run_deprecated_v1
def test_converted_call_no_user_code(self):
def f(x):
return len(x)
opts = converter.ConversionOptions(internal_convert_user_code=False)
# f should not be converted, causing len to error out.
with self.assertRaisesRegexp(Exception, 'len is not well defined'):
api.converted_call(f, opts, (constant_op.constant([0]),), {})
# len on the other hand should work fine.
x = api.converted_call(len, opts, (constant_op.constant([0]),), {})
# The constant has static shape so the result is a primitive not a Tensor.
self.assertEqual(x, 1)
def test_converted_call_no_kwargs_allowed(self):
def f(*args):
# Note: np.broadcast rejects any **kwargs, even *{}
return np.broadcast(args[:1])
opts = converter.ConversionOptions(internal_convert_user_code=False)
self.assertIsNotNone(api.converted_call(f, opts, (1, 2, 3, 4), None))
def test_converted_call_whitelisted_method(self):
opts = converter.ConversionOptions(recursive=True)
model = sequential.Sequential([core.Dense(2)])
x = api.converted_call(model.call, opts, (constant_op.constant([[0.0]]),),
{'training': True})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[0.0, 0.0]], self.evaluate(x))
def test_converted_call_whitelisted_method_via_owner(self):
opts = converter.ConversionOptions(recursive=True)
model = sequential.Sequential([core.Dense(2)])
x = api.converted_call(model.call, opts, (constant_op.constant([[0.0]]),),
{'training': True})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([[0.0, 0.0]], self.evaluate(x))
def test_converted_call_numpy(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(np.arange, opts, (5,), {})
self.assertAllEqual(x, list(range(5)))
def test_converted_call_tf_op_forced(self):
# TODO(mdan): Add the missing level of support to LOGICAL_EXPRESSIONS.
opts = converter.ConversionOptions(
user_requested=True, optional_features=None)
x = api.converted_call(gen_math_ops.add, opts, (1, 1), {})
self.assertAllEqual(self.evaluate(x), 2)
def test_converted_call_exec_generated_code(self):
temp_mod = imp.new_module('test_module')
dynamic_code = """
def foo(x):
return x + 1
"""
exec(textwrap.dedent(dynamic_code), temp_mod.__dict__) # pylint:disable=exec-used
opts = converter.ConversionOptions(optional_features=None)
x = api.converted_call(temp_mod.foo, opts, (1,), {})
self.assertAllEqual(x, 2)
def test_converted_call_namedtuple(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(collections.namedtuple, opts,
('TestNamedtuple', ('a', 'b')), {})
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_via_collections(self):
opts = converter.ConversionOptions(recursive=True)
x = api.converted_call(collections.namedtuple, opts,
('TestNamedtuple', ('a', 'b')), {})
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_subclass_bound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while tf.reduce_sum(x) > self.a:
x //= self.b
return x
opts = converter.ConversionOptions(recursive=True)
obj = TestClass(5, 2)
x = api.converted_call(obj.test_method, opts,
(constant_op.constant([2, 4]),), {})
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_namedtuple_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
pass
opts = converter.ConversionOptions(recursive=True)
obj = TestClass(5, 2)
# _asdict is a documented method of namedtuple.
x = api.converted_call(obj._asdict, opts, (), {})
self.assertDictEqual(x, {'a': 5, 'b': 2})
def test_converted_call_namedtuple_subclass_unbound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while tf.reduce_sum(x) > self.a:
x //= self.b
return x
opts = converter.ConversionOptions(recursive=True)
obj = TestClass(5, 2)
x = api.converted_call(TestClass.test_method, opts,
(obj, constant_op.constant([2, 4])), {})
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_lambda(self):
opts = converter.ConversionOptions(recursive=True)
l = lambda x: x == 0
x = api.converted_call(l, opts, (constant_op.constant(0),), {})
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(True, self.evaluate(x))
def test_converted_call_defun_object_method(self):
opts = converter.ConversionOptions(recursive=True)
# pylint:disable=method-hidden
class TestClass(object):
def method(self):
return 1
def prepare(self):
self.method = function.defun(self.method)
# pylint:enable=method-hidden
tc = TestClass()
tc.prepare()
x = api.converted_call(tc.method, opts, (), {})
self.assertAllEqual(1, self.evaluate(x))
def test_converted_call_through_tf_dataset(self):
def other_fn(x):
if x > 0:
return x
return -x
def f():
return dataset_ops.Dataset.range(-3, 3).map(other_fn)
# Dataset iteration only works inside tf.
@def_function.function
def graph_fn():
opts = converter.ConversionOptions(recursive=True)
ds = api.converted_call(f, opts, (), {})
itr = iter(ds)
return next(itr), next(itr), next(itr)
self.assertAllEqual(self.evaluate(graph_fn()), (3, 2, 1))
def assertNoMemoryLeaks(self, f):
object_ids_before = {id(o) for o in gc.get_objects()}
f()
gc.collect()
objects_after = tuple(
o for o in gc.get_objects() if id(o) not in object_ids_before)
self.assertEmpty(
tuple(o for o in objects_after if isinstance(o, TestResource)))
def test_converted_call_no_leaks_via_closure(self):
def test_fn():
res = TestResource()
def f(y):
return res.x + y
opts = converter.ConversionOptions(recursive=True)
api.converted_call(f, opts, (1,), {})
self.assertNoMemoryLeaks(test_fn)
def test_converted_call_no_leaks_via_inner_function_closure(self):
def test_fn():
res = TestResource()
def f(y):
def inner_f():
return res.x + y
return inner_f
opts = converter.ConversionOptions(recursive=True)
api.converted_call(f, opts, (1,), {})()
self.assertNoMemoryLeaks(test_fn)
def test_context_tracking_direct_calls(self):
@api.do_not_convert()
def unconverted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.DISABLED)
@api.convert()
def converted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
unconverted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
converted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
@api.call_with_unspecified_conversion_status
def unspecified_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
unspecified_fn()
def test_to_graph_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with tf.Graph().as_default():
x = compiled_fn(constant_op.constant((4, 8)), 4)
self.assertAllEqual(self.evaluate(x), (1, 2))
@test_util.run_deprecated_v1
def test_to_graph_with_defaults(self):
foo = 4
def test_fn(x, s=foo):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with self.cached_session() as sess:
x = compiled_fn(constant_op.constant([4, 8]))
self.assertListEqual([1, 2], self.evaluate(x).tolist())
def test_to_graph_with_globals(self):
def test_fn(x):
global global_n
global_n = x + global_n
return global_n
converted_fn = api.to_graph(test_fn)
prev_val = global_n
converted_fn(10)
self.assertGreater(global_n, prev_val)
def test_to_graph_with_kwargs_clashing_converted_call(self):
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match converted_call's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_with_kwargs_clashing_unconverted_call(self):
@api.do_not_convert
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match _call_unconverted's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_caching(self):
def test_fn(x):
if x > 0:
return x
else:
return -x
converted_functions = tuple(api.to_graph(test_fn) for _ in (-1, 0, 1))
# All outputs are from the same module. We can't use __module__ because
# that's reset when we instantiate the function (see conversion.py).
# TODO(mdan): Can and should we overwrite __module__ instead?
module_names = frozenset(f.ag_module for f in converted_functions)
self.assertEqual(len(module_names), 1)
self.assertNotIn('__main__', module_names)
self.assertEqual(len(frozenset(id(f) for f in converted_functions)), 3)
def test_to_graph_caching_different_options(self):
def called_fn():
pass
def test_fn():
return called_fn()
converted_recursive = api.to_graph(test_fn, recursive=True)
converted_non_recursive = api.to_graph(test_fn, recursive=False)
self.assertNotEqual(converted_recursive.ag_module,
converted_non_recursive.ag_module)
self.assertRegex(tf_inspect.getsource(converted_recursive),
'FunctionScope(.*recursive=True.*)')
self.assertRegex(tf_inspect.getsource(converted_non_recursive),
'FunctionScope(.*recursive=False.*)')
def test_to_graph_preserves_bindings(self):
y = 3
def test_fn():
return y
converted = api.to_graph(test_fn)
self.assertEqual(converted(), 3)
y = 7
self.assertEqual(converted(), 7)
def test_to_graph_source_map(self):
def test_fn(y):
return y**2
self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
def test_to_graph_sets_conversion_context(self):
def g():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
return 0
# Note: the autograph=False sets the contect to Status.DISABLED. The test
# verifies that to_graph overrides that.
@def_function.function(autograph=False)
def f():
converted_g = api.to_graph(g)
converted_g()
f()
def test_to_code_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x /= 2
return x
# Just check that the output is parseable Python code.
self.assertIsNotNone(parser.parse_str(api.to_code(test_fn)))
def test_tf_convert_direct(self):
def f():
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
# Note: the autograph setting of tf.function has nothing to do with the
# test case. We just disable it to avoid confusion.
@def_function.function(autograph=False)
def test_fn(ctx):
return api.tf_convert(f, ctx)()
self.assertEqual(
self.evaluate(
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))), -1)
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED))
def test_tf_convert_unspecified_not_converted_by_default(self):
def f():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
@def_function.function
def test_fn(ctx):
return api.tf_convert(f, ctx, convert_by_default=False)()
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED))
def test_tf_convert_whitelisted_method(self):
model = sequential.Sequential([core.Dense(2)])
converted_call = api.tf_convert(
model.call, ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))
_, converted_target = tf_decorator.unwrap(converted_call)
self.assertIs(converted_target.__func__, model.call.__func__)
def test_tf_convert_wrapped(self):
def f():
if tf.reduce_sum([1, 2]) > 0:
return -1
return 1
@functools.wraps(f)
def wrapper(*args, **kwargs):
return wrapper.__wrapped__(*args, **kwargs)
decorated_f = tf_decorator.make_decorator(f, wrapper)
# Note: the autograph setting of tf has nothing to do with the
# test case. We just disable it to avoid confusion.
@def_function.function(autograph=False)
def test_fn(ctx):
return api.tf_convert(decorated_f, ctx)()
self.assertEqual(
self.evaluate(
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED))), -1)
# tf_convert mutates the decorator, so we need to create a new one for
# another test.
decorated_f = tf_decorator.make_decorator(f, wrapper)
with self.assertRaisesRegex(TypeError, 'tf.Tensor.*bool'):
# The code in `f` is only valid with AutoGraph.
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED))
def test_super_with_one_arg(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def one_arg(self, x):
test_base_unbound = super(TestSubclass)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.plus_three(x)
tc = api.converted_call(TestSubclass,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(5, tc.one_arg(2))
def test_super_with_two_args(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def two_args(self, x):
return super(TestSubclass, self).plus_three(x)
tc = api.converted_call(TestSubclass,
converter.ConversionOptions(recursive=True), (), {})
self.assertEqual(5, tc.two_args(2))
if __name__ == '__main__':
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/impl/api_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkers for detecting unsupported Python features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
class UnsupportedFeaturesChecker(gast.NodeVisitor):
"""Quick check for Python features we know we don't support.
Any features detected will cause AutoGraph to not compile a function.
"""
def visit_Attribute(self, node):
if (node.attr is not None
and node.attr.startswith('__') and not node.attr.endswith('__')):
raise NotImplementedError(
'Mangled names are not yet supported by AutoGraph')
# These checks could potentially be replaced with inspect.isgeneratorfunction
# to avoid a getsource/parse/ast-walk round trip.
def visit_Yield(self, node):
raise NotImplementedError('Generators are not supported by AutoGraph')
def visit_YieldFrom(self, node):
raise NotImplementedError('Generators are not supported by AutoGraph')
def verify(node):
UnsupportedFeaturesChecker().visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/unsupported_features_checker.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import config_lib
Action = config_lib.Action
Convert = config_lib.Convert
DoNotConvert = config_lib.DoNotConvert
# This list is evaluated in order and stops at the first rule that tests True
# for a definitely_convert of definitely_bypass call.
CONVERSION_RULES = (
# Builtin modules
DoNotConvert('collections'),
DoNotConvert('copy'),
DoNotConvert('cProfile'),
DoNotConvert('inspect'),
DoNotConvert('ipdb'),
DoNotConvert('linecache'),
DoNotConvert('mock'),
DoNotConvert('pathlib'),
DoNotConvert('pdb'),
DoNotConvert('posixpath'),
DoNotConvert('pstats'),
DoNotConvert('re'),
DoNotConvert('threading'),
# Known libraries
DoNotConvert('numpy'),
DoNotConvert('tensorflow'),
# TODO(b/133417201): Remove.
DoNotConvert('tensorflow_probability'),
# TODO(b/133842282): Remove.
DoNotConvert('tensorflow_datasets.core'),
)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/config.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
# TODO(mdan): For better performance, allow each rule to take a set names.
class Rule(object):
"""Base class for conversion rules."""
def __init__(self, module_prefix):
self._prefix = module_prefix
def matches(self, module_name):
return (module_name.startswith(self._prefix + '.') or
module_name == self._prefix)
class Action(enum.Enum):
NONE = 0
CONVERT = 1
DO_NOT_CONVERT = 2
class DoNotConvert(Rule):
"""Indicates that this module should be not converted."""
def __str__(self):
return 'DoNotConvert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.DO_NOT_CONVERT
return Action.NONE
class Convert(Rule):
"""Indicates that this module should be converted."""
def __str__(self):
return 'Convert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.CONVERT
return Action.NONE
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/config_lib.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Symbol naming utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.utils import misc
class _NamingStyle(enum.Enum):
SNAKE = 1
CAMEL = 2
class Namer(object):
"""Symbol name generartor."""
def __init__(self, global_namespace):
self.global_namespace = global_namespace
self.generated_names = set()
def _as_symbol_name(self, fqn, style=_NamingStyle.SNAKE):
"""Returns a symbol name that matches a fully-qualified name.
The returned name is safe to use for Python symbols. Any special characters
present in fqn are replaced according to the style argument.
Examples:
self._as_symbol_name('foo.bar', style=_NamingStyle.CAMEL) == 'FooBar'
self._as_symbol_name('foo.bar', style=_NamingStyle.SNAKE) == 'foo_bar'
See the unit tests for more examples.
Args:
fqn: Union[Text, Tuple[Text]] a fully-qualified symbol name. The qualifier
may include module, class names, attributes, etc.
style: _NamingStyle
Returns:
Text
"""
assert style in _NamingStyle
if isinstance(fqn, tuple):
cn = '.'.join(fqn)
else:
cn = fqn
# Until we clean up the whole FQN mechanism, `fqn` may not be
# canonical, that is, in can appear as ('foo.bar', 'baz')
# This replaces any characters that might remain because of that.
pieces = cn.split('.')
if style == _NamingStyle.CAMEL:
pieces = tuple(misc.capitalize_initial(p) for p in pieces)
return ''.join(pieces)
elif style == _NamingStyle.SNAKE:
return '_'.join(pieces)
def class_name(self, original_fqn):
"""Returns the name of a converted class."""
canonical_name = self._as_symbol_name(
original_fqn, style=_NamingStyle.CAMEL)
new_name_root = 'Tf%s' % canonical_name
new_name = new_name_root
n = 0
while new_name in self.global_namespace:
n += 1
new_name = '%s_%d' % (new_name_root, n)
self.generated_names.add(new_name)
return new_name
def function_name(self, original_fqn):
"""Returns the name of a converted function."""
canonical_name = self._as_symbol_name(
original_fqn, style=_NamingStyle.SNAKE)
new_name_root = 'tf__%s' % canonical_name
new_name = new_name_root
n = 0
while new_name in self.global_namespace:
n += 1
new_name = '%s_%d' % (new_name_root, n)
self.generated_names.add(new_name)
return new_name
def new_symbol(self, name_root, reserved_locals):
"""See control_flow.SymbolNamer.new_symbol."""
# reserved_locals may contain QNs.
all_reserved_locals = set()
for s in reserved_locals:
if isinstance(s, qual_names.QN):
all_reserved_locals.update(s.qn)
elif isinstance(s, str):
all_reserved_locals.add(s)
else:
raise ValueError('Unexpected symbol type "%s"' % type(s))
pieces = name_root.split('_')
if pieces[-1].isdigit():
name_root = '_'.join(pieces[:-1])
n = int(pieces[-1])
else:
n = 0
new_name = name_root
while (new_name in self.global_namespace or
new_name in all_reserved_locals or new_name in self.generated_names):
n += 1
new_name = '%s_%d' % (name_root, n)
self.generated_names.add(new_name)
return new_name
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/naming.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for wrapping converted functions bodies with auxiliary logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.util import nest
class FunctionScope(object):
"""Context manager that wraps the body of a converted function.
This context manager handles various operations related to the scope of a
function:
* optional TF name scopes - these name scopes match the name of the
function, for easy visualization in tensorBoard;
* optional automatic control dependencies - this adds the same mechanism
for control dependenecies that is used by `@tf.function`; it can be
optionally enabled when using `tf.autograph.to_graph`;
* tracking of autograph conversion state (whether it's enabled by the user,
conversion options;
"""
def __init__(self, function_name, scope_name, options):
self.name = scope_name
self.options = options
if options.user_requested:
self.autograph_ctx = ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED,
options)
self.callopts = options.call_options()
use_name_scope = options.uses(converter.Feature.NAME_SCOPES)
self.use_name_scope = use_name_scope
if use_name_scope:
self.name_scope = ops.name_scope(self._sanitize(function_name))
use_auto_deps = self.options.uses(converter.Feature.AUTO_CONTROL_DEPS)
self.use_auto_deps = use_auto_deps
if use_auto_deps:
self.autodeps_scope = auto_control_deps.AutomaticControlDependencies()
self._return_value_marked = False
def _sanitize(self, name):
"""See https://www.tensorflow.org/api_docs/python/tf/Graph#name_scope."""
# TensorFlow doesn't like leading underscores at the top level.
if name and name.startswith('_'):
name = 'fn' + name
return name
def __enter__(self):
if self.options.user_requested:
self.autograph_ctx.__enter__()
if self.use_name_scope:
self.name_scope.__enter__()
if self.use_auto_deps:
self.autodeps_scope.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.options.user_requested:
self.autograph_ctx.__exit__(exc_type, exc_val, exc_tb)
if self.use_name_scope:
self.name_scope.__exit__(exc_type, exc_val, exc_tb)
if self.use_auto_deps:
self.autodeps_scope.__exit__(exc_type, exc_val, exc_tb)
def mark_return_value(self, value):
"""Marks a value as returned from the function guarded by the scope."""
if self.use_auto_deps:
self._return_value_marked = True
if value is None:
# We don't create dummy returns, to preserve Python semantics. The user
# is responsible for adding a return value to the top-level function.
return None
def _mark_return_if_tensor(t):
if tensor_util.is_tensor(t):
return self.autodeps_scope.mark_as_return(t)
return t
value = nest.map_structure(_mark_return_if_tensor, value)
return value
def with_function_scope(thunk, scope_name, options):
"""Inline version of the FunctionScope context manager."""
with FunctionScope('lambda_', scope_name, options) as scope:
return thunk(scope)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/function_wrappers.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for function_wrappers module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrappers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FunctionWrappersTest(test.TestCase):
def test_name_scope(self):
if context.executing_eagerly():
self.skipTest('Tensor names are disabled in eager')
with function_wrappers.FunctionScope(
'test_name', None,
converter.ConversionOptions(
optional_features=converter.Feature.NAME_SCOPES)):
t = constant_op.constant(1)
self.assertIn('test_name', t.name)
def test_auto_cotrol_deps(self):
v = variables.Variable(1)
with function_wrappers.FunctionScope(
'_', None,
converter.ConversionOptions(
optional_features=converter.Feature.AUTO_CONTROL_DEPS)) as scope:
v.assign(2)
op = scope.mark_return_value(constant_op.constant(1))
self.evaluate(op)
self.assertEqual(self.evaluate(v.read_value()), 2)
def test_all_disabled(self):
with function_wrappers.FunctionScope(None, None,
converter.STANDARD_OPTIONS):
t = constant_op.constant(1)
self.assertEqual(self.evaluate(t), 1)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/function_wrappers_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for naming module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import naming
from tensorflow.python.platform import test
class NamerTest(test.TestCase):
def test_function_name_tracks_names(self):
namer = naming.Namer({})
self.assertEqual('tf__foo', namer.function_name('foo'))
self.assertEqual('tf__bar', namer.function_name('bar'))
self.assertItemsEqual(('tf__bar', 'tf__foo'), namer.generated_names)
def test_function_name_consistent(self):
namer = naming.Namer({})
self.assertEqual('tf__foo', namer.function_name('foo'))
self.assertEqual('tf__foo', namer.function_name('foo'))
def test_function_name_unsanitized_fqn(self):
namer = naming.Namer({})
self.assertEqual('tf__foo_bar', namer.function_name('foo.bar'))
self.assertEqual('tf__foo_bar_baz', namer.function_name(('foo.bar', 'baz')))
def test_class_name_basic(self):
namer = naming.Namer({})
self.assertEqual('TfFooBar', namer.class_name(('foo', 'Bar')))
def test_class_name_unsanitized_fqn(self):
namer = naming.Namer({})
self.assertEqual('TfFooBarBaz', namer.class_name(('foo.bar', 'Baz')))
def test_function_name_avoids_global_conflicts(self):
namer = naming.Namer({'tf__foo': 1})
self.assertEqual('tf__foo_1', namer.function_name('foo'))
def test_new_symbol_tracks_names(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp',), namer.generated_names)
def test_new_symbol_avoids_duplicates(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp', 'temp_1'), namer.generated_names)
def test_new_symbol_avoids_conflicts(self):
namer = naming.Namer({'temp': 1})
# temp is reserved in the global namespace
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
# temp_2 is reserved in the local namespace
self.assertEqual('temp_3', namer.new_symbol('temp', set(('temp_2',))))
self.assertItemsEqual(('temp_1', 'temp_3'), namer.generated_names)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/naming_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(
collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
"""
pass
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext,
targe_name: Text
"""
def __init__(self, namer, entity_info, program_ctx, target_name=None):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
self.target_name = target_name
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(other_value).strip(),
compiler.ast_to_source(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(enum.Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/converter.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thread-local context managers for AutoGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import enum
stacks = threading.local()
def _control_ctx():
if not hasattr(stacks, 'control_status'):
stacks.control_status = [_default_control_status_ctx()]
return stacks.control_status
def control_status_ctx():
ret = _control_ctx()[-1]
return ret
class Status(enum.Enum):
UNSPECIFIED = 0
ENABLED = 1
DISABLED = 2
class ControlStatusCtx(object):
"""A context that tracks whether autograph is enabled by the user."""
def __init__(self, status, options=None):
self.status = status
self.options = options
def __enter__(self):
_control_ctx().append(self)
return self
def __repr__(self):
return '{}[status={}, options={}]'.format(
self.__class__.__name__, self.status, self.options)
def __exit__(self, unused_type, unused_value, unused_traceback):
assert _control_ctx()[-1] is self
_control_ctx().pop()
def _default_control_status_ctx():
return ControlStatusCtx(status=Status.UNSPECIFIED)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/ag_ctx.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for tests in this module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import imp
import sys
import six
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrappers
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import test
class TestCase(test.TestCase):
"""Base class for unit tests in this module. Contains relevant utilities."""
@contextlib.contextmanager
def assertPrints(self, expected_result):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
yield
self.assertEqual(out_capturer.getvalue(), expected_result)
finally:
sys.stdout = sys.__stdout__
@contextlib.contextmanager
def compiled(self, node, namespace, symbols=()):
source = None
self.dynamic_calls = []
# See api.converted_call
def converted_call(f, unused_opts, args, kwargs, unused_function_ctx):
"""Mock version of api.converted_call."""
self.dynamic_calls.append((args, kwargs))
if kwargs is None:
kwargs = {}
return f(*args, **kwargs)
try:
result, source, source_map = compiler.ast_to_object(
node, include_source_map=True)
# TODO(mdan): Move the unparsing from converter into pyct and reuse here.
# TODO(mdan): Move this into self.prepare()
result.tf = self.make_fake_mod('fake_tf', *symbols)
fake_ag = self.make_fake_mod('fake_ag', converted_call,
converter.ConversionOptions)
fake_ag.__dict__.update(operators.__dict__)
fake_ag.__dict__.update(special_functions.__dict__)
fake_ag.ConversionOptions = converter.ConversionOptions
fake_ag.Feature = converter.Feature
fake_ag.utils = utils
fake_ag.FunctionScope = function_wrappers.FunctionScope
result.ag__ = fake_ag
result.ag_source_map__ = source_map
for k, v in namespace.items():
result.__dict__[k] = v
yield result
except Exception: # pylint:disable=broad-except
if source is None:
print('Offending AST:\n%s' % pretty_printer.fmt(node, color=False))
else:
print('Offending compiled code:\n%s' % source)
raise
@contextlib.contextmanager
def converted(self, entity, converter_module, namespace, tf_symbols=()):
node, ctx = self.prepare(entity, namespace)
if not isinstance(converter_module, (list, tuple)):
converter_module = (converter_module,)
for i, m in enumerate(converter_module):
node = converter.standard_analysis(node, ctx, is_initial=not i)
node = m.transform(node, ctx)
with self.compiled(node, namespace, tf_symbols) as result:
yield result
def make_fake_mod(self, name, *symbols):
fake_mod = imp.new_module(name)
for s in symbols:
if hasattr(s, '__name__'):
setattr(fake_mod, s.__name__, s)
elif hasattr(s, 'name'):
# This is a bit of a hack, but works for things like tf.int32
setattr(fake_mod, s.name, s)
else:
raise ValueError('can not attach %s - what should be its name?' % s)
return fake_mod
def attach_namespace(self, module, **ns):
for k, v in ns.items():
setattr(module, k, v)
def prepare(self, test_fn, namespace, recursive=True):
namespace['ConversionOptions'] = converter.ConversionOptions
future_features = ('print_function', 'division')
node, source = parser.parse_entity(test_fn, future_features=future_features)
namer = naming.Namer(namespace)
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(recursive=recursive),
autograph_module=None)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
future_features=future_features,
namespace=namespace)
ctx = converter.EntityContext(
namer, entity_info, program_ctx, 'test_fn')
origin_info.resolve_entity(node, source, test_fn)
node = converter.standard_analysis(node, ctx, is_initial=True)
return node, ctx
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/converter_testing.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for converter module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.platform import test
class TestConverter(converter.Base):
pass
class ConversionOptionsTest(converter_testing.TestCase):
def test_to_ast(self):
opts = converter.ConversionOptions()
opts_ast = opts.to_ast()
template = '''
def test_fn():
return opts_ast
'''
opts_packed = templates.replace(template, opts_ast=opts_ast)
reparsed, _, _ = compiler.ast_to_object(opts_packed)
reparsed.__dict__['ag__'] = self.make_fake_mod(
'fake_ag', converter.ConversionOptions, converter.Feature)
reparsed_opts = reparsed.test_fn()
self.assertEqual(opts.recursive, reparsed_opts.recursive)
self.assertEqual(opts.user_requested, False)
self.assertEqual(
opts.internal_convert_user_code,
reparsed_opts.internal_convert_user_code)
self.assertEqual(opts.optional_features, reparsed_opts.optional_features)
class ConverterBaseTest(converter_testing.TestCase):
def test_get_definition_directive_basic(self):
directive_key = object
def test_fn():
a = 1
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[1].value
defs, = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs.directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_default(self):
directive_key = object
def test_fn():
a = 1
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[1].value
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
parser.parse_expression('default'))
self.assertEqual(value.id, 'default')
def test_get_definition_directive_multiple_consistent(self):
directive_key = object
def test_fn():
a = 1
if a:
a = 2
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('baz'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_multiple_inconsistent(self):
directive_key = object
def test_fn():
a = 1
if a:
a = 2
return a
ns = {}
node, ctx = self.prepare(test_fn, ns)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
with self.assertRaises(ValueError):
c.get_definition_directive(symbol_a, directive_key, 'test_arg', None)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/core/converter_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used to capture Python idioms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Undefined(object):
"""Represents an undefined symbol in Python.
This is used to reify undefined symbols, which is required to use the
functional form of loops.
Example:
while n > 0:
n = n - 1
s = n
return s # Runtime error if n == 0
This is valid Python code and will not result in an error as long as n
is positive. The use of this class is to stay as close to Python semantics
as possible for staged code of this nature.
Converted version of the above showing the possible usage of this class:
s = Undefined('s')
init_state = (s,)
s = while_loop(cond, body, init_state)
return s # s is an instance of Undefined if the loop never runs
Attributes:
symbol_name: Text, identifier for the undefined symbol
"""
def __init__(self, symbol_name):
# TODO(aqj) Possibly remove this after Symbols are fully integrated.
self.symbol_name = symbol_name
def is_undefined(value):
"""Checks whether Autograph has determined that a given value is undefined.
This only works in places where Autograph reifies undefined symbols. Note that
if this function is passed a truly undefined symbol the call-site will raise
NameError.
Args:
value: value to test for undefinedness
Returns:
Boolean, whether the input value is undefined.
"""
return isinstance(value, Undefined)
# TODO(mdan): Refactor as a RetVal object, aggregating the value and do_return.
class UndefinedReturnValue(object):
"""Represents a default return value from a function (None in Python)."""
pass
def retval(value):
"""Returns the actual value that a return statement should produce."""
if isinstance(value, UndefinedReturnValue):
return None
return value
def is_undefined_return(value):
"""Checks whether `value` is the default return value."""
return isinstance(value, UndefinedReturnValue)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/special_values.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logical boolean operators: not, and, or."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
def not_(a):
"""Functional form of "not"."""
if tensor_util.is_tensor(a):
return _tf_not(a)
return _py_not(a)
def _tf_not(a):
"""Implementation of the "not_" operator for TensorFlow."""
return gen_math_ops.logical_not(a)
def _py_not(a):
"""Default Python implementation of the "not_" operator."""
return not a
def and_(a, b):
"""Functional form of "and". Uses lazy evaluation semantics."""
a_val = a()
if tensor_util.is_tensor(a_val):
return _tf_lazy_and(a_val, b)
return _py_lazy_and(a_val, b)
def _tf_lazy_and(cond, b):
"""Lazy-eval equivalent of "and" for Tensors."""
# TODO(mdan): Enforce cond is scalar here?
return control_flow_ops.cond(cond, b, lambda: cond)
def _py_lazy_and(cond, b):
"""Lazy-eval equivalent of "and" in Python."""
return cond and b()
def or_(a, b):
"""Functional form of "or". Uses lazy evaluation semantics."""
a_val = a()
if tensor_util.is_tensor(a_val):
return _tf_lazy_or(a_val, b)
return _py_lazy_or(a_val, b)
def _tf_lazy_or(cond, b):
"""Lazy-eval equivalent of "or" for Tensors."""
# TODO(mdan): Enforce cond is scalar here?
return control_flow_ops.cond(cond, lambda: cond, b)
def _py_lazy_or(cond, b):
"""Lazy-eval equivalent of "or" in Python."""
return cond or b()
def eq(a, b):
"""Functional form of "equal"."""
if tensor_util.is_tensor(a) or tensor_util.is_tensor(b):
return _tf_equal(a, b)
return _py_equal(a, b)
def _tf_equal(a, b):
"""Overload of "equal" for Tensors."""
return gen_math_ops.equal(a, b)
def _py_equal(a, b):
"""Overload of "equal" that falls back to Python's default implementation."""
return a == b
def not_eq(a, b):
"""Functional form of "not-equal"."""
return not_(eq(a, b))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/logical.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python_lang_utils module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import special_values
from tensorflow.python.platform import test
class SpecialValuesTest(test.TestCase):
def test_undefined(self):
undefined_symbol = special_values.Undefined('name')
self.assertEqual(undefined_symbol.symbol_name, 'name')
undefined_symbol2 = special_values.Undefined('name')
self.assertNotEqual(undefined_symbol, undefined_symbol2)
self.assertTrue(special_values.is_undefined(undefined_symbol))
self.assertTrue(special_values.is_undefined(undefined_symbol2))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/special_values_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Structures that allow uniform control over the dispatch process."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# TODO(mdan): This is where macro override controls fit.
class DispatchContext(collections.namedtuple(
'DispatchContext',
('options',))):
"""Allows passing additional parameters to the specific implementations.
Attributes:
options: Optional dict of extra arguments that may be required by specific
implementations.
"""
def option(self, name):
return self.options[name]
NO_CTX = DispatchContext(options={})
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/dispatch_context.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc.
Note: most of these operators accept pairs of get_state/set_state functions, to
capture mutations that the corresponding code blocks might make. These
mutations only need to be captured when staging the control flow, and they just
work when reverting to Python behavior.
__Examples__
```
while cond:
self.x += i
```
When the functionalized version is executed as a Python loop, it just works:
```
def loop_body():
self.x += i # works as expected for Python loops
```
But it won't work for TF loops:
```
def loop_body():
self.x += i # self.x has the wrong value!
```
get_state/set_state allow piping the mutations through the loop variables as
well, in effect changing the loop body:
```
def loop_body(self_x):
self.x = self_x # self.x now has the proper value
self.x += i # the original block
self_x = self.x # write self.x back into the loop vars
return self_x
self_x = tf.while_loop(...)
self.x = self_x # the result is not properly captured
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.operators import special_values
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.autograph.utils import misc
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import take_while_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
LIMIT_PYTHON_ITERATIONS = True
PYTHON_MAX_ITERATIONS = 100000000 # Fails in about one minute for empty loops.
WARN_INEFFICIENT_UNROLL = True
INEFFICIENT_UNROLL_MIN_ITERATIONS = 3000
INEFFICIENT_UNROLL_MIN_OPS = 1
def _disallow_undefs_into_loop(*values):
"""Ensures that all values in the state are defined when entering a loop."""
undefined = tuple(filter(special_values.is_undefined, values))
if undefined:
raise ValueError(
'TensorFlow requires that the following symbols must be defined'
' before the loop: {}'.format(tuple(s.symbol_name for s in undefined)))
for value in values:
if special_values.is_undefined_return(value):
# Assumption: the loop will only capture the variable which tracks the
# return value if the loop contained a return statement.
# TODO(mdan): This should be checked at the place where return occurs.
raise ValueError(
'return statements are not supported within a TensorFlow loop.')
def _shape_greater_than_or_equal(shape1, shape2):
"""Check whether the shape2 is equal or more specific than shape1."""
# The following logic was mirrored from control_flow_ops.py's
# _ShapeLessThanOrEqual function.
if shape1.dims is None:
return True
if shape1.ndims != shape2.ndims:
return False
for dim1, dim2 in zip(shape1.dims, shape2.dims):
if dim1.value is not None and dim1.value != dim2.value:
return False
return True
def _verify_tf_loop_vars(init_loop_vars,
first_iter_vars,
basic_symbol_names,
composite_symbol_names,
include_shapes=True):
"""Verifies loop variables for consistency."""
# The whole point of _verify_tf_loop_vars is to give more useful error message
# than tf-level exception by including variable names. If it's not available,
# there is no point at performing this verification here. As of 2019-07-31,
# operators:control_flow_test does not pass the names.
if basic_symbol_names is None:
return
output_symbol_names = basic_symbol_names + composite_symbol_names
assert len(init_loop_vars) == len(first_iter_vars) == len(output_symbol_names)
for init_loop_var, first_iter_var, name in zip(init_loop_vars,
first_iter_vars,
output_symbol_names):
try:
nest.assert_same_structure(
init_loop_var, first_iter_var, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError('"{}" does not have the same nested structure after one'
' iteration.\n\n{}'.format(name, e))
def _check_same_type(name, init_loop_var, first_iter_var):
"""Ensures init_loop_var and first_iter_var are consistent."""
if isinstance(init_loop_var, (bool, int, float, str)):
init_loop_var = ops.convert_to_tensor_v2(init_loop_var)
if isinstance(first_iter_var, (bool, int, float, str)):
first_iter_var = ops.convert_to_tensor_v2(first_iter_var)
if (not tensor_util.is_tensor(init_loop_var) or
not tensor_util.is_tensor(first_iter_var)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(init_loop_var, 'dtype') or
not hasattr(first_iter_var, 'dtype')):
return
if (not hasattr(init_loop_var, 'shape') or
not hasattr(first_iter_var, 'shape')):
return
if init_loop_var.dtype != first_iter_var.dtype:
raise TypeError(
'"{}" has dtype {} before the loop, but dtype {} after one'
' iteration. TensorFlow control flow requires it stays the'
' same.'.format(
name,
init_loop_var.dtype.name,
first_iter_var.dtype.name,
))
if include_shapes:
init_shape = init_loop_var.shape
first_iter_shape = first_iter_var.shape
# TODO(b/135183013): Update needed once we support shape_invariants.
if not _shape_greater_than_or_equal(init_shape, first_iter_shape):
raise ValueError(
'"{}" has shape {} before the loop, but shape {} after one'
' iteration. TensorFlow control flow requires it stays the'
' same or be more specific.'.format(name, init_shape,
first_iter_shape))
nest.map_structure(
functools.partial(_check_same_type, name), init_loop_var,
first_iter_var)
def _verify_tf_cond_vars(body_outputs, orelse_outputs, basic_symbol_names,
composite_symbol_names):
"""Verifies variables manipulated by a conditional for consistency."""
# The whole point of _verify_tf_cond_vars is to give more useful error message
# than tf-level exception by including variable names. If it's not available,
# there is no point at performing this verification here. As of 2019-07-31,
# conditional expression does not pass the names.
if basic_symbol_names is None:
return
output_symbol_names = basic_symbol_names + composite_symbol_names
basic_body_outputs, composite_body_outputs = body_outputs
basic_orelse_outputs, composite_orelse_outputs = orelse_outputs
assert isinstance(composite_body_outputs, tuple)
assert isinstance(composite_orelse_outputs, tuple)
# TODO(kkimlabs): Make this more consistent.
# The basic outputs should always be a tuple.
if not isinstance(basic_body_outputs, tuple):
basic_body_outputs = (basic_body_outputs,)
if not isinstance(basic_orelse_outputs, tuple):
basic_orelse_outputs = (basic_orelse_outputs,)
body_outputs = basic_body_outputs + composite_body_outputs
orelse_outputs = basic_orelse_outputs + composite_orelse_outputs
for body_output, orelse_output, name in zip(body_outputs, orelse_outputs,
output_symbol_names):
try:
nest.assert_same_structure(
body_output, orelse_output, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError(
'"{}" does not have the same nested structure in the TRUE and FALSE'
' branches.\n\n{}'.format(name, str(e)))
def _check_same_type(name, body_output_var, orelse_output_var):
"""Verfies that body_output_var and orelse_output_var have same dtype."""
if isinstance(body_output_var, (bool, int, float, str)):
body_output_var = ops.convert_to_tensor_v2(body_output_var)
if isinstance(orelse_output_var, (bool, int, float, str)):
orelse_output_var = ops.convert_to_tensor_v2(orelse_output_var)
if (not tensor_util.is_tensor(body_output_var) or
not tensor_util.is_tensor(orelse_output_var)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(body_output_var, 'dtype') or
not hasattr(orelse_output_var, 'dtype')):
return
if body_output_var.dtype != orelse_output_var.dtype:
raise TypeError(
'"{}" has dtype {} in the TRUE branch, but dtype={} in the FALSE'
' branch. TensorFlow control flow requires that they are the'
' same.'.format(name, body_output_var.dtype.name,
orelse_output_var.dtype.name))
nest.map_structure(
functools.partial(_check_same_type, name), body_output, orelse_output)
def for_stmt(iter_,
extra_test,
body,
get_state,
set_state,
init_vars,
basic_symbol_names=None,
composite_symbol_names=None):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the iterate as well as the
variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
The state is represented by the variables geo_mean and arith_mean. The
argument for initial_state may contain the tuple (1, 0), the body will
include the arguments geo_mean and arith_mean and will return a tuple
representing the new values for geo_mean and respectively arith_mean.
Args:
iter_: The entity being iterated over.
extra_test: Callable with the state as arguments, and boolean return type.
An additional loop condition.
body: Callable with the iterate and the state as arguments, and state as
return type. The actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
init_vars: Tuple containing the initial state.
basic_symbol_names: Tuple containing basic loop var names.
composite_symbol_names: Tuple containing composite loop var names.
Returns:
Tuple containing the final state.
"""
if tensor_util.is_tensor(iter_):
if tensors.is_range_tensor(iter_):
return _tf_range_for_stmt(iter_, extra_test, body, get_state, set_state,
init_vars, basic_symbol_names,
composite_symbol_names)
else:
return _known_len_tf_for_stmt(iter_, extra_test, body, get_state,
set_state, init_vars, basic_symbol_names,
composite_symbol_names)
if isinstance(iter_, dataset_ops.DatasetV2):
return _tf_dataset_for_stmt(iter_, extra_test, body, get_state, set_state,
init_vars, basic_symbol_names,
composite_symbol_names)
if isinstance(iter_, iterator_ops.IteratorV2):
return _tf_iterator_for_stmt(iter_, extra_test, body, get_state, set_state,
init_vars, basic_symbol_names,
composite_symbol_names)
# Note: This experimental interface is subject to change.
custom_handler = getattr(iter_, '_autograph_for_loop', None)
if custom_handler is not None:
# TODO(mdan): TensorFlow-specific verification - handlers should perform it.
_disallow_undefs_into_loop(*init_vars)
# TODO(mdan): Enable get_state/set_state separately.
return custom_handler(extra_test, body, init_vars)
return _py_for_stmt(iter_, extra_test, body, get_state, set_state, init_vars)
def _py_for_stmt(iter_, extra_test, body, get_state, set_state, init_vars):
"""Overload of for_stmt that executes a Python for loop."""
del get_state, set_state
state = init_vars
for target in iter_:
if extra_test is not None and not extra_test(*state):
break
state = body(target, *state)
return state
def _known_len_tf_for_stmt(iter_, extra_test, body, get_state, set_state,
init_vars, basic_symbol_names,
composite_symbol_names):
"""Overload of for_stmt that iterates over TF entities that admit a length."""
_disallow_undefs_into_loop(*init_vars)
n = py_builtins.len_(iter_)
# TODO(b/117628877): Revisit performance once XLA has the necessary support.
# Note: using a TensorArray creates an extra copy, but can calculate
# gradients more efficiently than StridedSlice.
ta = tensor_array_ops.TensorArray(iter_.dtype, size=n)
iter_ = ta.unstack(iter_)
def while_body(iterate_index, *loop_vars):
"""Main loop body."""
iterate = iter_.read(iterate_index)
new_vars = body(iterate, *loop_vars)
_verify_tf_loop_vars(loop_vars, new_vars, basic_symbol_names,
composite_symbol_names)
loop_vars = (iterate_index + 1,)
if new_vars:
loop_vars += new_vars
return loop_vars
def while_cond(iterate_index, *loop_vars):
if extra_test is not None:
return control_flow_ops.cond(
iterate_index < n, lambda: extra_test(*loop_vars), lambda: False)
return iterate_index < n
opts = {}
# TODO(b/134181679): We do not always set maximum_iterations since that
# is significantly slower on GPU.
if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):
opts['maximum_iterations'] = n
results = _tf_while_stmt(
while_cond,
while_body,
get_state,
set_state,
(0,) + init_vars,
None,
None,
opts=opts,
)
# Note: the iteration index is not returned by the while loop, however
# if a symbol with the same name exists outside the loop, it will be captured
# by the loop variables and ultimately updated correctly.
if isinstance(results, (tuple, list)):
assert len(results) >= 1 # Has at least the iterate.
if len(results) > 1:
results = results[1:]
else:
results = ()
return results
def _tf_range_for_stmt(iter_, extra_test, body, get_state, set_state, init_vars,
basic_symbol_names, composite_symbol_names):
"""Overload of for_stmt that iterates over a TF range (and elides it)."""
_disallow_undefs_into_loop(*init_vars)
start, limit, delta = iter_.op.inputs
def while_body(iterate, *loop_vars):
new_vars = body(iterate, *loop_vars)
loop_vars = (iterate + delta,)
if new_vars:
loop_vars += new_vars
return loop_vars
def while_cond(iterate, *loop_vars):
"""Cond function for `tf.while_loop`."""
def build_main_test():
"""Main iteration condition."""
# Note(b/138857806): LogicalAnd is slow on GPU so we avoid adding it if
# `delta` is a compile time constant.
delta_const = tensor_util.constant_value(delta)
if delta_const is not None:
# Support single element arrays.
delta_const = np.asscalar(delta_const)
if delta_const >= 0:
return iterate < limit
else:
return iterate > limit
else:
return math_ops.logical_or(
math_ops.logical_and(delta >= 0, iterate < limit),
math_ops.logical_and(delta < 0, iterate > limit))
main_test = build_main_test()
if extra_test is not None:
return control_flow_ops.cond(
main_test, lambda: extra_test(*loop_vars), lambda: False)
return main_test
# The first loopvar corresponds to the iterate variable which is internal.
if isinstance(basic_symbol_names, tuple):
basic_symbol_names = (None,) + basic_symbol_names
opts = {}
# TODO(b/134181679): We do not always set maximum_iterations since that
# is significantly slower on GPU.
if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):
# This specific dtype is required by while_loop.
opts['maximum_iterations'] = math_ops.cast(
misc.get_range_len(start, limit, delta), dtypes.int32)
results = _tf_while_stmt(
while_cond,
while_body,
get_state,
set_state,
(start,) + init_vars,
basic_symbol_names,
composite_symbol_names,
opts=opts,
)
# Note: the iteration index is not returned by the while loop, however
# if a symbol with the same name exists outside the loop, it will be captured
# by the loop variables and ultimately updated correctly.
if isinstance(results, (tuple, list)):
assert len(results) >= 1 # Has at least the iterate.
if len(results) > 1:
results = results[1:]
else:
results = ()
return results
def _tf_iterator_for_stmt(itr, extra_test, body, get_state, set_state,
init_vars, basic_symbol_names,
composite_symbol_names):
"""Overload of for_stmt that iterates over TF Iterators. See for_loop."""
_disallow_undefs_into_loop(*init_vars)
def while_body_actual(opt_iterate, *loop_vars):
"""Actual main loop body."""
new_vars = body(opt_iterate.get_value(), *loop_vars)
_verify_tf_loop_vars(loop_vars, new_vars, basic_symbol_names,
composite_symbol_names)
# TODO(mdan): Fix this inconsistency in the converter.
if new_vars is None:
new_vars = ()
return new_vars
def while_body(has_next, loop_vars):
"""Main loop body."""
opt_iterate = iterator_ops.get_next_as_optional(itr)
has_next = opt_iterate.has_value()
if not init_vars:
# cond_v2 requires at least one state tensor in V1.
dummy_state = (constant_op.constant(()),)
else:
dummy_state = ()
# TODO(mdan): If tf.while_loop supported Optional, this could be avoided.
new_vars = control_flow_ops.cond(
has_next,
lambda: dummy_state + while_body_actual(opt_iterate, *loop_vars),
lambda: dummy_state + loop_vars,
)
if dummy_state:
new_vars = new_vars[1:]
return has_next, new_vars
def while_cond(has_next, loop_vars):
if extra_test is not None:
return control_flow_ops.cond(
has_next, lambda: extra_test(*loop_vars), lambda: False)
return has_next
# The first loopvar corresponds to the iterate variable which is internal.
_, final_vars = _tf_while_stmt(
while_cond,
while_body,
get_state,
set_state,
(True, init_vars),
None,
None,
opts=None,
)
return final_vars
def _tf_dataset_for_stmt(ds, extra_test, body, get_state, set_state, init_vars,
basic_symbol_names, composite_symbol_names):
"""Overload of for_stmt that iterates over TF Datasets."""
_disallow_undefs_into_loop(*init_vars)
if extra_test is not None:
assert init_vars, 'Lowering should always add state.'
return _dataset_for_stmt_with_extra_test(ds, extra_test, body, get_state,
set_state, init_vars,
basic_symbol_names,
composite_symbol_names)
return _dataset_for_stmt_no_extra_test(ds, body, get_state, set_state,
init_vars, basic_symbol_names,
composite_symbol_names)
def _dataset_for_stmt_with_extra_test(ds, extra_test, body, get_state,
set_state, init_vars, basic_symbol_names,
composite_symbol_names):
"""Overload of _dataset_for_stmt with early stopping. See for_stmt."""
# TODO(mdan): Simplify this - following it is extremely difficult.
def scan_body(aug_vars, iterate):
"""The main loop body wrapper. Only calculates the stop condition."""
loop_vars, state = aug_vars
def true_fn():
set_state(state)
outputs = body(iterate, *loop_vars)
_verify_tf_loop_vars(
loop_vars + state,
outputs + state,
basic_symbol_names,
composite_symbol_names,
include_shapes=False)
return outputs, get_state()
extra_cond = extra_test(*loop_vars)
new_vars, new_state = control_flow_ops.cond(
extra_cond, true_fn, lambda: (loop_vars, state))
scan_outputs = new_vars, new_state, extra_cond
# Note: new_aug_vars is the actual state of scan; scan_outputs is its output
# (hence the redundancy).
# get_state will pull any mutations that body may have made.
new_aug_vars = new_vars, new_state
return new_aug_vars, scan_outputs
def take_while_predicate(unused_loop_vars, unused_state, extra_cond):
return extra_cond
def reduce_body(unused_aug_vars, scan_outputs):
output_aug_vars, output_state, extra_cond = scan_outputs
del extra_cond
return output_aug_vars, output_state
init_state = get_state()
aug_vars = init_vars, init_state
ds = ds.apply(scan_ops.scan(aug_vars, scan_body))
ds = ds.apply(take_while_ops.take_while(take_while_predicate))
final_aug_vars = ds.reduce(aug_vars, reduce_body)
final_vars, final_state = final_aug_vars
set_state(final_state)
return final_vars
def _dataset_for_stmt_no_extra_test(ds, body, get_state, set_state, init_vars,
basic_symbol_names, composite_symbol_names):
"""Overload of _dataset_for_stmt without early stopping. See for_stmt."""
init_state = get_state()
assert isinstance(init_vars, tuple)
assert isinstance(init_state, tuple)
# Workaround for Dataset.reduce not allowing empty state tensors - create
# a dummy state variable that remains unused.
# TODO(mdan): reduce should allow and match empty structures.
no_vars = not init_vars
no_state = not init_state
if no_vars:
init_vars = (constant_op.constant(0),)
if isinstance(basic_symbol_names, tuple):
basic_symbol_names = (None,) + basic_symbol_names
if no_state:
init_state = (constant_op.constant(0),)
def reduce_body(aug_vars, iterate):
"""The main loop body wrapper."""
loop_vars, state = aug_vars
if not no_state:
set_state(state)
if no_vars:
body(iterate)
new_vars = loop_vars
else:
new_vars = body(iterate, *loop_vars)
if no_state:
new_state = state
else:
new_state = get_state()
_verify_tf_loop_vars(
loop_vars + state,
new_vars + new_state,
basic_symbol_names,
composite_symbol_names,
include_shapes=False)
return new_vars, new_state
aug_vars = init_vars, get_state()
final_vars, final_state = ds.reduce(aug_vars, reduce_body)
set_state(final_state)
if no_vars:
return ()
return final_vars
def while_stmt(
test,
body,
get_state,
set_state,
init_vars,
basic_symbol_names=None,
composite_symbol_names=None,
opts=None,
):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
Args:
test: Callable with the state as arguments, and boolean return type. The
loop condition.
body: Callable with the state as arguments, and state as return type. The
actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
init_vars: Tuple containing the initial state.
basic_symbol_names: Tuple containing basic loop var names.
composite_symbol_names: Tuple containing composite loop var names.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# Evaluate the initial test once in order to do the dispatch. The evaluation
# is isolated to minimize unwanted side effects.
# TODO(mdan): Do a full iteration - some state types might lower to Tensor.
with func_graph.FuncGraph('tmp').as_default():
init_test = test(*init_vars)
# TensorFlow: Multiple evaluations are acceptable in this case, so we're fine
# with the re-evaluation of `test` that `_tf_while_stmt` will make.
if tensors.is_dense_tensor(init_test):
return _tf_while_stmt(test, body, get_state, set_state, init_vars,
basic_symbol_names, composite_symbol_names, opts)
# Normal Python: We already consumed one evaluation of `test`; consistently,
# unroll one iteration before dispatching to a normal loop.
# TODO(mdan): Push the "init_test" value via opts into _py_while_stmt?
if not init_test:
return init_vars
init_vars = body(*init_vars)
return _py_while_stmt(test, body, get_state, set_state, init_vars, opts)
# TODO(kkimlabs): Some callers set basic_symbol_names=None and
# composite_symbol_names=None and call _verify_tf_loop_vars(...) itself. We can
# remove these arguments once all callers do that.
def _tf_while_stmt(test, body, get_state, set_state, init_vars,
basic_symbol_names, composite_symbol_names, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
_disallow_undefs_into_loop(*init_vars)
if opts is None:
opts = {}
# TODO(mdan): Simplify this.
loop_vars_slice = slice(len(init_vars))
state_slice = slice(len(init_vars), None)
def aug_test(*aug_loop_vars):
state = aug_loop_vars[state_slice]
set_state(state)
return test(*aug_loop_vars[loop_vars_slice])
def aug_body(*aug_loop_vars):
state = aug_loop_vars[state_slice]
set_state(state)
loop_vars = body(*aug_loop_vars[loop_vars_slice])
new_state = loop_vars + get_state()
_verify_tf_loop_vars(aug_loop_vars, new_state, basic_symbol_names,
composite_symbol_names)
return new_state
# Non-v2 while_loop unpacks the results when there is only one return value.
# This enforces consistency across versions.
opts['return_same_structure'] = True
aug_init_vars = init_vars + get_state()
final_aug_vars = control_flow_ops.while_loop(aug_test, aug_body,
aug_init_vars, **opts)
final_state = final_aug_vars[state_slice]
set_state(final_state)
return final_aug_vars[loop_vars_slice]
class _PythonLoopChecker(object):
"""Verifies Python loops for TF-specific limits."""
def __init__(self):
self.iterations = 0
self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL
# Triggered when we decided to test the op counts.
self.check_op_count_after_iteration = False
def _get_ops(self):
return ops.get_default_graph().get_operations()
def _check_unroll_limits(self):
if LIMIT_PYTHON_ITERATIONS and self.iterations > PYTHON_MAX_ITERATIONS:
raise ValueError('iteration limit exceeded')
def _stop_checking_inefficient_unroll(self):
self.check_inefficient_unroll = False
self.ops_before_iteration = None
def _verify_ineffcient_unroll(self):
"""Checks for possibly-inefficient creation of ops in a Python loop."""
assert self.ops_before_iteration is not None
ops_after_iteration = self._get_ops()
new_ops = tuple(
op for op in ops_after_iteration if op not in self.ops_before_iteration)
if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS:
return False
# TODO(mdan): Add location information.
ag_logging.warn(
'TensorFlow ops are being created in a Python loop with large number'
' of iterations. This can lead to slow startup. Did you mean to use a'
' TensorFlow loop? For example, `while True:` is a Python loop, and'
' `while tf.constant(True):` is a TensorFlow loop. The following'
' ops were created after iteration %s: %s', self.iterations, new_ops)
return True
def before_iteration(self):
"""Called before each iteration in a Python loop."""
if (self.check_inefficient_unroll and
self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS):
self.ops_before_iteration = self._get_ops()
self.check_op_count_after_iteration = True
def after_iteration(self):
"""Called after each iteration in a Python loop."""
self.iterations += 1
self._check_unroll_limits()
if self.check_inefficient_unroll and self.check_op_count_after_iteration:
did_warn = self._verify_ineffcient_unroll()
if did_warn:
self._stop_checking_inefficient_unroll() # Only warn once.
elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3:
# Once deciding to check the op counts, only do it for a few iterations.
self._stop_checking_inefficient_unroll()
def _py_while_stmt(test, body, get_state, set_state, init_vars, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts, get_state, set_state
if __debug__:
checker = _PythonLoopChecker()
loop_vars = init_vars
while test(*loop_vars):
if __debug__:
checker.before_iteration()
loop_vars = body(*loop_vars)
if __debug__:
checker.after_iteration()
return loop_vars
def if_stmt(cond,
body,
orelse,
get_state,
set_state,
basic_symbol_names=None,
composite_symbol_names=None):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch as
return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
get_state: Function that returns a tuple containing the values of all
composite symbols modified within the conditional. This allows access to
state that branches may mutate through side effects. This function is not
needed and should not be called when dispatching to code matching Python's
default semantics. This is useful for checkpointing to avoid unintended
side-effects when staging requires evaluating all code-paths.
set_state: Function to set the values of all composite symbols modified
within the conditional. This is the complement to get_state, used to
restore checkpointed values. The single argument a tuple containing values
for each composite symbol that may be modified in a branch of the
conditional. The is usually the result of a call to get_state.
basic_symbol_names: Tuple containing basic loop var names.
composite_symbol_names: Tuple containing composite loop var names.
Returns:
Tuple containing the statement outputs.
"""
# Note: tf.cond doesn't support SparseTensor.
if tensors.is_dense_tensor(cond):
return tf_if_stmt(cond, body, orelse, get_state, set_state,
basic_symbol_names, composite_symbol_names)
else:
return _py_if_stmt(cond, body, orelse)
def tf_if_stmt(cond, body, orelse, get_state, set_state, basic_symbol_names,
composite_symbol_names):
"""Overload of if_stmt that stages a TF cond."""
body = _wrap_disallow_undefs_from_cond(body, branch_name='if')
orelse = _wrap_disallow_undefs_from_cond(orelse, branch_name='else')
body = _isolate_state(body, get_state, set_state)
orelse = _isolate_state(orelse, get_state, set_state)
# `state` currently includes the values of any composite symbols (e.g. `a.b`)
# composites modified by the loop. `final_vars` includes the values of basic
# symbols (e.g. `a`) which cannot be passed by reference and must be returned.
# See _isolate_state.
# TODO(mdan): We should minimize calls to get/set_state.
body_branch = 0
orelse_branch = 1
result = [None, None]
def error_checking_body():
result[body_branch] = body()
if result[orelse_branch] is not None:
_verify_tf_cond_vars(result[body_branch], result[orelse_branch],
basic_symbol_names, composite_symbol_names)
return result[body_branch]
def error_checking_orelse():
result[orelse_branch] = orelse()
if result[body_branch] is not None:
_verify_tf_cond_vars(result[body_branch], result[orelse_branch],
basic_symbol_names, composite_symbol_names)
return result[orelse_branch]
final_vars, final_state = control_flow_ops.cond(cond, error_checking_body,
error_checking_orelse)
set_state(final_state)
return final_vars
def _isolate_state(func, get_state, set_state):
"""Wraps func to (best-effort) isolate state mutations that func may do.
The simplest example of state mutation is mutation of variables (via e.g.
attributes), or modification of globals.
This allows us to more safely execute this function without worrying about
side effects when the function wasn't normally expected to execute. For
example, staging requires that the function is executed ahead of time, and
we need to ensure its effects are not observed during normal execution.
Args:
func: () -> Any
get_state: () -> Any, returns the current state
set_state: (Any) -> None, resets the state to the specified values.
Typically the result of an earlier call to `get_state`.
Returns:
Tuple[Any, Any], where the first element is the return value of `func`,
and the second is the final state values.
"""
def wrapper():
init_state = get_state()
new_vars = func()
# TODO(mdan): These should be copies, lest set_state might affect them.
new_state = get_state()
set_state(init_state)
return new_vars, new_state
return wrapper
def _wrap_disallow_undefs_from_cond(func, branch_name):
"""Wraps conditional branch to disallow returning undefined symbols."""
def wrapper():
"""Calls function and raises an error if undefined symbols are returned."""
results = func()
if isinstance(results, tuple):
results_tuple = results
else:
results_tuple = results,
undefined = tuple(filter(special_values.is_undefined, results_tuple))
if undefined:
raise ValueError(
'The following symbols must also be initialized in the {} branch: {}.'
' Alternatively, you may initialize them before the if'
' statement.'.format(branch_name,
tuple(s.symbol_name for s in undefined)))
for result in results_tuple:
if special_values.is_undefined_return(result):
raise ValueError(
'A value must also be returned from the {} branch. If a value is '
'returned from one branch of a conditional a value must be '
'returned from all branches.'.format(branch_name))
return results
return wrapper
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/control_flow.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module implements operators that AutoGraph overloads.
Note that "operator" is used loosely here, and includes control structures like
conditionals and loops, implemented in functional form, using for example
closures for the body.
"""
# Naming conventions:
# * operator names match the name usually used for the respective Python
# idiom; examples: for_stmt, list_append
# * operator arguments match either of:
# - the corresponding Python AST attribute (e.g. the condition of an if
# statement is called test) if the operator represents an AST construct
# - the names used in the Python docs, if the operator is a function (e.g.
# list_ and x for append, see
# https://docs.python.org/3.7/tutorial/datastructures.html)
#
# All operators may accept a final argument named "opts", of a type that
# subclasses namedtuple and contains any arguments that are only required
# for some specializations of the operator.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators.control_flow import for_stmt
from tensorflow.python.autograph.operators.control_flow import if_stmt
from tensorflow.python.autograph.operators.control_flow import while_stmt
from tensorflow.python.autograph.operators.data_structures import list_append
from tensorflow.python.autograph.operators.data_structures import list_pop
from tensorflow.python.autograph.operators.data_structures import list_stack
from tensorflow.python.autograph.operators.data_structures import ListPopOpts
from tensorflow.python.autograph.operators.data_structures import ListStackOpts
from tensorflow.python.autograph.operators.data_structures import new_list
from tensorflow.python.autograph.operators.exceptions import assert_stmt
from tensorflow.python.autograph.operators.logical import and_
from tensorflow.python.autograph.operators.logical import eq
from tensorflow.python.autograph.operators.logical import not_
from tensorflow.python.autograph.operators.logical import not_eq
from tensorflow.python.autograph.operators.logical import or_
from tensorflow.python.autograph.operators.py_builtins import float_
from tensorflow.python.autograph.operators.py_builtins import int_
from tensorflow.python.autograph.operators.py_builtins import len_
from tensorflow.python.autograph.operators.py_builtins import print_
from tensorflow.python.autograph.operators.py_builtins import range_
from tensorflow.python.autograph.operators.slices import get_item
from tensorflow.python.autograph.operators.slices import GetItemOpts
from tensorflow.python.autograph.operators.slices import set_item
from tensorflow.python.autograph.operators.special_values import is_undefined
from tensorflow.python.autograph.operators.special_values import is_undefined_return
from tensorflow.python.autograph.operators.special_values import retval
from tensorflow.python.autograph.operators.special_values import Undefined
from tensorflow.python.autograph.operators.special_values import UndefinedReturnValue
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for logical module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import logical
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class LogicalOperatorsTest(test.TestCase):
def assertNotCalled(self):
self.fail('this should not be called')
def _tf_true(self):
return constant_op.constant(True)
def _tf_false(self):
return constant_op.constant(False)
def test_and_python(self):
self.assertTrue(logical.and_(lambda: True, lambda: True))
self.assertTrue(logical.and_(lambda: [1], lambda: True))
self.assertListEqual(logical.and_(lambda: True, lambda: [1]), [1])
self.assertFalse(logical.and_(lambda: False, lambda: True))
self.assertFalse(logical.and_(lambda: False, self.assertNotCalled))
@test_util.run_deprecated_v1
def test_and_tf(self):
with self.cached_session() as sess:
t = logical.and_(self._tf_true, self._tf_true)
self.assertEqual(self.evaluate(t), True)
t = logical.and_(self._tf_true, lambda: True)
self.assertEqual(self.evaluate(t), True)
t = logical.and_(self._tf_false, lambda: True)
self.assertEqual(self.evaluate(t), False)
# TODO(mdan): Add a test for ops with side effects.
def test_or_python(self):
self.assertFalse(logical.or_(lambda: False, lambda: False))
self.assertFalse(logical.or_(lambda: [], lambda: False))
self.assertListEqual(logical.or_(lambda: False, lambda: [1]), [1])
self.assertTrue(logical.or_(lambda: False, lambda: True))
self.assertTrue(logical.or_(lambda: True, self.assertNotCalled))
@test_util.run_deprecated_v1
def test_or_tf(self):
with self.cached_session() as sess:
t = logical.or_(self._tf_false, self._tf_true)
self.assertEqual(self.evaluate(t), True)
t = logical.or_(self._tf_false, lambda: True)
self.assertEqual(self.evaluate(t), True)
t = logical.or_(self._tf_true, lambda: True)
self.assertEqual(self.evaluate(t), True)
# TODO(mdan): Add a test for ops with side effects.
def test_not_python(self):
self.assertFalse(logical.not_(True))
self.assertFalse(logical.not_([1]))
self.assertTrue(logical.not_([]))
def test_not_tf(self):
with self.cached_session() as sess:
t = logical.not_(self._tf_false())
self.assertEqual(self.evaluate(t), True)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/logical_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for special symbol handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import special_values
from tensorflow.python.autograph.operators import symbols
from tensorflow.python.platform import test
Undefined = special_values.Undefined
AttributeAccessSymbol = symbols.AttributeAccessSymbol
SubscriptSymbol = symbols.SubscriptSymbol
ValueSymbol = symbols.ValueSymbol
class SymbolsTest(test.TestCase):
def test_value_symbol_returns_value(self):
a = 42
a_symbol = ValueSymbol('a', a)
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(a_symbol.name, 'a')
def test_attribute_access_missing_attribute(self):
class Foo(object):
pass
a = Foo()
a_symbol = ValueSymbol('a', a)
a_b_symbol = AttributeAccessSymbol(a_symbol, 'b')
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertIsInstance(a_b_symbol.maybe_compute_value(), Undefined)
self.assertEqual(a_b_symbol.maybe_compute_value().symbol_name, 'a.b')
def test_attribute_access_undefined_target(self):
a = Undefined('a')
a_symbol = ValueSymbol('a', a)
a_b_symbol = AttributeAccessSymbol(a_symbol, 'b')
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertIsInstance(a_b_symbol.maybe_compute_value(), Undefined)
self.assertEqual(a_b_symbol.maybe_compute_value().symbol_name, 'a.b')
def test_attribute_access_basic(self):
class Foo(object):
def __init__(self):
self.b = 'this is an attribute'
a = Foo()
a_symbol = ValueSymbol('a', a)
a_b_symbol = AttributeAccessSymbol(a_symbol, 'b')
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(a_b_symbol.maybe_compute_value(), a.b)
def test_item_access_undefined_index(self):
class Foo(object):
def __getitem__(self, key):
return 'this is an item'
a = Foo()
b = Undefined('b')
a_symbol = ValueSymbol('a', a)
b_symbol = ValueSymbol('b', b)
a_b_symbol = SubscriptSymbol(a_symbol, b_symbol)
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(b_symbol.maybe_compute_value(), b)
self.assertIsInstance(a_b_symbol.maybe_compute_value(), Undefined)
self.assertEqual(a_b_symbol.maybe_compute_value().symbol_name, 'a[b]')
def test_item_access_no_getitem(self):
class Foo(object):
pass
a = Foo()
b = 42
a_symbol = ValueSymbol('a', a)
b_symbol = ValueSymbol('b', b)
a_b_symbol = SubscriptSymbol(a_symbol, b_symbol)
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(b_symbol.maybe_compute_value(), b)
self.assertIsInstance(a_b_symbol.maybe_compute_value(), Undefined)
self.assertEqual(a_b_symbol.maybe_compute_value().symbol_name, 'a[b]')
def test_item_access_undefined_root(self):
a = Undefined('a')
b = 42
a_symbol = ValueSymbol('a', a)
b_symbol = ValueSymbol('b', b)
a_b_symbol = SubscriptSymbol(a_symbol, b_symbol)
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(b_symbol.maybe_compute_value(), b)
self.assertIsInstance(a_b_symbol.maybe_compute_value(), Undefined)
self.assertEqual(a_b_symbol.maybe_compute_value().symbol_name, 'a[b]')
def test_item_access_basic(self):
class Foo(object):
def __getitem__(self, key):
return 'this is an item'
a = Foo()
b = 42
a_symbol = ValueSymbol('a', a)
b_symbol = ValueSymbol('b', b)
a_b_symbol = SubscriptSymbol(a_symbol, b_symbol)
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(b_symbol.maybe_compute_value(), b)
self.assertEqual(a_b_symbol.maybe_compute_value(), a[b])
def test_item_access_after_attribute_access(self):
class Foo(object):
def __getitem__(self, key):
return 'this is an item'
class Bar(object):
def __init__(self):
self.b = Foo()
a = Bar()
c = 42
a_symbol = ValueSymbol('a', a)
c_symbol = ValueSymbol('c', c)
a_b_symbol = AttributeAccessSymbol(a_symbol, 'b')
a_b_c_symbol = SubscriptSymbol(a_b_symbol, c_symbol)
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(c_symbol.maybe_compute_value(), c)
self.assertEqual(a_b_symbol.maybe_compute_value(), a.b)
self.assertEqual(a_b_c_symbol.maybe_compute_value(), a.b[c])
def test_attribute_access_after_item_access(self):
class Bar(object):
def __init__(self):
self.c = object()
item = Bar()
class Foo(object):
def __getitem__(self, key):
return item
a = Foo()
b = 42
a_symbol = ValueSymbol('a', a)
b_symbol = ValueSymbol('b', b)
a_b_symbol = SubscriptSymbol(a_symbol, b_symbol)
a_b_c_symbol = AttributeAccessSymbol(a_b_symbol, 'c')
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(b_symbol.maybe_compute_value(), b)
self.assertEqual(a_b_symbol.maybe_compute_value(), a[b])
self.assertEqual(a_b_c_symbol.maybe_compute_value(), a[b].c)
def test_item_access_after_item_access(self):
class Bar(object):
def __getitem__(self, key):
return 'this is an item'
item = Bar()
class Foo(object):
def __getitem__(self, key):
return item
a = Foo()
b = 42
c = 43
a_symbol = ValueSymbol('a', a)
b_symbol = ValueSymbol('b', b)
c_symbol = ValueSymbol('b', c)
a_b_symbol = SubscriptSymbol(a_symbol, b_symbol)
a_b_c_symbol = SubscriptSymbol(a_b_symbol, c_symbol)
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(b_symbol.maybe_compute_value(), b)
self.assertEqual(a_b_symbol.maybe_compute_value(), a[b])
self.assertEqual(a_b_c_symbol.maybe_compute_value(), a[b][c])
def test_attribute_access_after_attribute_access(self):
class Bar(object):
def __init__(self):
self.c = object()
class Foo(object):
def __init__(self):
self.b = Bar()
a = Foo()
a_symbol = ValueSymbol('a', a)
a_b_symbol = AttributeAccessSymbol(a_symbol, 'b')
a_b_c_symbol = AttributeAccessSymbol(a_b_symbol, 'c')
self.assertEqual(a_symbol.maybe_compute_value(), a)
self.assertEqual(a_b_symbol.maybe_compute_value(), a.b)
self.assertEqual(a_b_c_symbol.maybe_compute_value(), a.b.c)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/symbols_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract representation of composite symbols that can used in staging code.
This provides a way to checkpoint the values of symbols that may be undefined
entering staged control flow. This checkpointing is necessary to prevent some
unintended side-effects. For example checkpointing prevents side-effects in one
branch of a conditional from leaking into another.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import special_values
is_undefined = special_values.is_undefined
Undefined = special_values.Undefined
class Symbol(object):
"""Representation of a simple or composite Python symbol.
Subclasses should implement `maybe_compute_value(self)` that returns the value
corresponding to the symbol or Undefined if no such value exists.
"""
def __init__(self, name):
self.name = name
class ValueSymbol(Symbol):
"""Representation of a simple Python symbol with a concrete value.
This includes variables and literals. Since we are reifying undefined symbols
`Undefined` is also a valid value.
"""
def __init__(self, name, value):
super(ValueSymbol, self).__init__(name)
self.value = value
def maybe_compute_value(self):
return self.value
class AttributeAccessSymbol(Symbol):
"""Representation of Python attribute access e.g. `a.b`."""
def __init__(self, parent_symbol, attr_name):
super(AttributeAccessSymbol, self).__init__(
parent_symbol.name + '.' + attr_name)
self.attr_name = attr_name
self.parent_symbol = parent_symbol
def maybe_compute_value(self):
"""Compute the value corresponding to the attribute access or `Undefined`.
This will be `Undefined` if no such value exists either because there is no
such attribute or if the base is itself undefined.
Returns:
value corresponding to the attribute access or `Undefined`
"""
parent_value = self.parent_symbol.maybe_compute_value()
if (is_undefined(parent_value) or
getattr(parent_value, self.attr_name, None) is None):
return Undefined(self.name)
else:
return parent_value.__getattribute__(self.attr_name)
class SubscriptSymbol(Symbol):
"""Representation of Python subscript access e.g. `a[b]`."""
def __init__(self, parent_symbol, index_symbol):
super(SubscriptSymbol, self).__init__(
parent_symbol.name + '[' + index_symbol.name + ']')
self.index_symbol = index_symbol
self.parent_symbol = parent_symbol
def maybe_compute_value(self):
"""Compute the value corresponding to the subscript access or `Undefined`.
This will be `Undefined` if no such value exists either because there is no
element corresponding to the given subscript or if the base itself is
not defined.
Returns:
value corresponding to the subscript access or `Undefined`
"""
parent_value = self.parent_symbol.maybe_compute_value()
index_value = self.index_symbol.maybe_compute_value()
if is_undefined(parent_value) or is_undefined(index_value):
return Undefined(self.name)
else:
try:
return parent_value[index_value]
except (IndexError, KeyError, TypeError):
# Reify the lack of an object for the given index/key
# This allows us to define them later without regret
return Undefined(self.name)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/symbols.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators corresponding to Python builtin functions.
List of built-in functions: https://docs.python.org/3/library/functions.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import six
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
UNSPECIFIED = object()
def overload_of(f):
if f in SUPPORTED_BUILTINS:
return BUILTIN_FUINCTIONS_MAP[f.__name__]
return f
def _find_originating_frame(caller_fn_scope, innermost=True):
"""Locates the frame in which `caller_fn_scope` was defined."""
ctx_frame = inspect.currentframe()
result = None
while ctx_frame is not None:
# Note it should not be normally possible to get false positives this way
# because the function scope object is not accessible to user code (barring
# call stack introspection).
if ctx_frame.f_locals.get(caller_fn_scope.name, None) is caller_fn_scope:
result = ctx_frame
if innermost:
break
ctx_frame = ctx_frame.f_back
assert result is not None, (
'the conversion process should ensure the caller_fn_scope is always'
' found somewhere on the call stack')
return result
def eval_in_original_context(f, args, caller_fn_scope):
"""Executes the eval function in the context of a specified function."""
# When control flow is rewritten using functions, eval should use the
# variables found in the same block where it was called. That is equivalent
# to the innermost function call.
ctx_frame = _find_originating_frame(caller_fn_scope, innermost=True)
args = (
args[0],
ctx_frame.f_globals if len(args) < 2 else args[1],
ctx_frame.f_locals if len(args) < 3 else args[2],
)
return f(*args)
def super_in_original_context(f, args, caller_fn_scope):
"""Executes the super function in the context of a specified function.
See https://docs.python.org/3/library/functions.html#super for the exact
details
Args:
f: Callable, typically the super builtin
args: List[Any], the original call arguments
caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
scope of the converted function in which this call was originally made
Returns:
The result of calling `f` as if it was called in the frame indicated by
`caller_fn_scope`.
"""
# Python 2 doesn't support implicit argument super variants.
if six.PY2:
return f(*args)
# Only the no-arg call is desugared.
if args:
return f(*args)
# Inner functions seem to include their closure in f_locals, so we need
# to find the outermost frame.
ctx_frame = _find_originating_frame(caller_fn_scope, innermost=False)
# When super(..) is called without arguments, it looks for __class__ cell
# variable and the first argument passed in the enclosing function according
# to the spec https://www.python.org/dev/peps/pep-3135/ .
#
# We couldn't verify if `inspect.currentframe().f_code.co_varnames[0]` is
# guaranteed to be the first argument from an official doc or PEP, however,
# it's fairly stable and well established:
# - An unofficial community doc mentions it.
# https://python-reference.readthedocs.io/en/latest/docs/code/varnames.html
# - CPython has tests checking that order, which was merged in 2008, and
# unchanged since then.
# https://github.com/python/cpython/blame/2f224a077a83ac9de8a12bb7dcc516642b8176d8/Lib/lib2to3/tests/data/py2_test_grammar.py#L157
# https://github.com/python/cpython/blame/2f224a077a83ac9de8a12bb7dcc516642b8176d8/Lib/lib2to3/tests/data/py3_test_grammar.py#L192
#
# Note: the name can be more reliably obtained by inspecting the calling
# function's argspec.
#
# Even though methods can be declared using *args (def method(*args)),
# that pattern is disallowed by super() -- it raises super() no arguments.
# Method definitions using **kwargs are not allowed at all.
# In other words, we can always assume that self is on the first positional
# argument (for correct code).
#
# TODO(mdan): Consider additional checks in case the input code is incorrect.
# For example, the error might be cryptic compared to what super() regularly
# raises.
type_arg = ctx_frame.f_locals['__class__']
self_arg_name = ctx_frame.f_code.co_varnames[0]
self_arg = ctx_frame.f_locals[self_arg_name]
return f(type_arg, self_arg)
def abs_(x):
if tensor_util.is_tensor(x):
return _tf_abs(x)
return _py_abs(x)
def _tf_abs(x):
return math_ops.abs(x)
def _py_abs(x):
return abs(x)
def float_(x=0):
if tensor_util.is_tensor(x):
return _tf_float(x)
return _py_float(x)
def _tf_float(x):
# TODO(mdan): We shouldn't assume float32.
if x.dtype == dtypes.string:
return gen_parsing_ops.string_to_number(x, out_type=dtypes.float32)
return math_ops.cast(x, dtype=dtypes.float32)
def _py_float(x):
return float(x)
def int_(x=0, base=UNSPECIFIED):
if tensor_util.is_tensor(x):
return _tf_int(x, base)
return _py_int(x, base)
def _tf_int(x, base):
if base not in (10, UNSPECIFIED):
raise NotImplementedError('base {} not supported for int'.format(base))
# TODO(mdan): We shouldn't assume int32.
if x.dtype == dtypes.string:
return gen_parsing_ops.string_to_number(x, out_type=dtypes.int32)
return math_ops.cast(x, dtype=dtypes.int32)
def _py_int(x, base):
if base is UNSPECIFIED:
return int(x)
return int(x, base)
def len_(s):
if tensors.is_tensor_array(s):
return _tf_tensor_array_len(s)
elif tensors.is_tensor_list(s):
return _tf_tensor_list_len(s)
elif tensor_util.is_tensor(s):
return _tf_tensor_len(s)
return _py_len(s)
def _tf_tensor_array_len(s):
return s.size()
def _tf_tensor_list_len(s):
return list_ops.tensor_list_length(s)
def _tf_tensor_len(s):
"""Overload of len_ for Tensor arguments."""
# Statically shaped tensors: length is known ahead of time.
if s.shape.ndims and s.shape.dims[0].value is not None:
return s.shape.dims[0].value
# Static shape of unknown dimensions: use dynamic shape but statically
# check that it's a scalar.
shape = array_ops.shape(s)
assert shape.shape, 'shape tensor of zero size? {}'.format(shape)
if shape.shape[0] == 0:
raise ValueError(
'len requires a non-scalar tensor, got one of shape {}'.format(shape))
if shape.shape.dims[0].value is not None:
return array_ops.shape(s)[0]
# Fully dynamic shape: use ops.
rank = array_ops.rank(s)
def raise_zero_rank_error():
msg = gen_string_ops.string_join(
['len requires non-zero rank, got ',
gen_string_ops.as_string(rank)])
with ops.control_dependencies([control_flow_ops.Assert(False, [msg])]):
return constant_op.constant(0, dtype=dtypes.int32)
return control_flow_ops.cond(rank > 0, lambda: array_ops.shape(s)[0],
raise_zero_rank_error)
def _py_len(s):
return len(s)
def print_(*objects, **kwargs):
"""Overload of the print builtin."""
# Note: Python 2.6 doesn't support explicit keywords after starargs.
unknown_kwargs = tuple(
set(kwargs.keys()) - set(('sep', 'end', 'file', 'flush')))
if unknown_kwargs:
raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs))
# TODO(mdan): Use next.flatten(objects) instead?
if any(tensor_util.is_tensor(o) for o in objects):
# TODO(mdan): use tf.print instead.
return _tf_py_func_print(objects, kwargs)
else:
_py_print(*objects, **kwargs)
def _py_print(*objects, **kwargs):
print(*objects, **kwargs)
def _tf_py_func_print(objects, kwargs):
"""Overload of print_ as a py_func implementation."""
override_kwargs = {k: v for k, v in kwargs.items() if v is not UNSPECIFIED}
if 'flush' not in override_kwargs:
# Defaulting to flushing the console in graph mode, which helps reduce
# garbled output in IPython.
override_kwargs['flush'] = True
def print_wrapper(*vals):
vals = tuple(v.numpy() if tensor_util.is_tensor(v) else v for v in vals)
if six.PY3:
# TensorFlow doesn't seem to generate Unicode when passing strings to
# py_func. This causes the print to add a "b'" wrapper to the output,
# which is probably never what you want.
vals = tuple(
v.decode('utf-8') if isinstance(v, bytes) else v for v in vals)
six.print_(*vals, **override_kwargs)
return py_func.wrap_py_func(
print_wrapper, None, objects, use_dummy_return=True)
def range_(start_or_stop, stop=UNSPECIFIED, step=UNSPECIFIED):
if any(tensor_util.is_tensor(s) for s in (start_or_stop, stop, step)):
return _tf_range(start_or_stop, stop, step)
return _py_range(start_or_stop, stop, step)
def _tf_range(start_or_stop, stop, step):
"""Overload of range_ that generates a TF range tensor."""
# Note: for static inputs (e.g. constants), tf.range errors out at graph
# construction time, instead of returning an empty tensor. Preventing the
# graph construction error aligns the semantics with Python.
# TODO(mdan): We should optimize this when a full tensor is not required.
if step is not UNSPECIFIED:
# TODO(mdan): Add argument coercion similar to other cases.
return math_ops.range(start_or_stop, stop, step)
if stop is not UNSPECIFIED:
stop = math_ops.maximum(start_or_stop, stop)
return math_ops.range(start_or_stop, stop)
start_or_stop = math_ops.maximum(start_or_stop, 0)
return math_ops.range(start_or_stop)
def _py_range(start_or_stop, stop, step):
if step is not UNSPECIFIED:
return range(start_or_stop, stop, step)
if stop is not UNSPECIFIED:
return range(start_or_stop, stop)
return range(start_or_stop)
def enumerate_(s, start=0):
if isinstance(s, dataset_ops.DatasetV2):
return _tf_dataset_enumerate(s, start)
return _py_enumerate(s, start)
def _tf_dataset_enumerate(s, start=0):
return s.enumerate(start)
def _py_enumerate(s, start=0):
return enumerate(s, start)
def zip_(*iterables):
if all(isinstance(x, dataset_ops.DatasetV2) for x in iterables):
return _tf_dataset_zip(*iterables)
return _py_zip(*iterables)
def _tf_dataset_zip(*iterables):
return dataset_ops.DatasetV2.zip(tuple(iterables))
def _py_zip(*iterables):
return zip(*iterables)
SUPPORTED_BUILTINS = (abs, float, int, len, print, range, enumerate, zip)
if six.PY2:
SUPPORTED_BUILTINS += (xrange,)
BUILTIN_FUINCTIONS_MAP = {
'abs': abs_,
'float': float_,
'int': int_,
'len': len_,
'print': print_,
'range': range_,
# TODO(mdan): This might make more sense as tf.data.range.
'xrange': range_,
'enumerate': enumerate_,
'zip': zip_,
}
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/py_builtins.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import six
from tensorflow.python.autograph.operators import control_flow
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ForLoopTest(test.TestCase):
def test_tensor(self):
with ops.Graph().as_default():
s = control_flow.for_stmt(
constant_op.constant([1, 2, 3, 4]),
extra_test=lambda s: True,
body=lambda i, s: (s * 10 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0,))
self.assertEqual(self.evaluate(s), (1234,))
def test_range_tensor(self):
with ops.Graph().as_default():
s = control_flow.for_stmt(
math_ops.range(5),
extra_test=lambda s: True,
body=lambda i, s: (s * 10 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0,))
self.assertEqual(self.evaluate(s), (1234,))
def test_range_tensor_random_delta(self):
with ops.Graph().as_default():
random_one = random_ops.random_uniform((), 1, 2, dtype=dtypes.int32)
s = control_flow.for_stmt(
math_ops.range(0, 5, random_one),
extra_test=lambda s: True,
body=lambda i, s: (s * 10 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0,))
self.assertEqual(self.evaluate(s), (1234,))
def test_range_tensor_explicit_limit_delta(self):
with ops.Graph().as_default():
s = control_flow.for_stmt(
math_ops.range(-17, -3, 5),
extra_test=lambda s: True,
body=lambda i, s: (s * 100 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0,))
self.assertEqual(self.evaluate(s), (-171207,))
def test_range_tensor_random_negative_delta(self):
with ops.Graph().as_default():
random_neg_five = random_ops.random_uniform((),
-5,
-4,
dtype=dtypes.int32)
s = control_flow.for_stmt(
math_ops.range(17, 3, random_neg_five),
extra_test=lambda s: True,
body=lambda i, s: (s * 100 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0,))
self.assertEqual(self.evaluate(s), (171207,))
def test_range_tensor_negative_delta(self):
with ops.Graph().as_default():
s = control_flow.for_stmt(
math_ops.range(17, 3, -5),
extra_test=lambda s: True,
body=lambda i, s: (s * 100 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0,))
self.assertEqual(self.evaluate(s), (171207,))
def test_tensor_with_extra_test_only_python_state(self):
class MutableObject(object):
field_1 = constant_op.constant(0, dtype=dtypes.int32)
field_2 = constant_op.constant(1, dtype=dtypes.int32)
state = MutableObject()
def get_state():
return (state.field_1, state.field_2)
def set_state(new_state):
state.field_1, state.field_2 = new_state
def body(i):
state.field_1 += i
state.field_2 *= i
return ()
control_flow.for_stmt(
iter_=constant_op.constant([1, 2, 3, 4]),
body=body,
extra_test=lambda: state.field_1 < 6,
get_state=get_state,
set_state=set_state,
init_vars=())
self.assertEqual(self.evaluate(state.field_1), 6)
self.assertEqual(self.evaluate(state.field_2), 6)
def test_python(self):
s = control_flow.for_stmt(
range(5),
extra_test=lambda s: True,
body=lambda i, s: (s * 10 + i,),
get_state=None,
set_state=None,
init_vars=(0,))
self.assertEqual(s, (1234,))
def test_tf_dataset(self):
with ops.Graph().as_default():
s = control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=None,
body=lambda i, s: (s * 10 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(constant_op.constant(0, dtype=dtypes.int64),))
self.assertEqual(self.evaluate(s), (1234,))
def test_dataset_with_extra_test(self):
s = control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda s: s < 3,
body=lambda i, s: (s + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(constant_op.constant(0, dtype=dtypes.int64),))
self.assertEqual(self.evaluate(s), (3,))
def test_dataset_with_extra_test_and_state(self):
state = [constant_op.constant(0, dtype=dtypes.int64)]
def get_state():
return (state[0],)
def set_state(new_state):
state[0], = new_state
def body(i, s):
state[0] += i
return (s + i,)
s = control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda s: s < 3,
body=body,
get_state=get_state,
set_state=set_state,
init_vars=(constant_op.constant(0, dtype=dtypes.int64),))
self.assertEqual(self.evaluate(s), (3,))
self.assertEqual(self.evaluate(state[0]), (3,))
def test_dataset_with_extra_test_no_extra_iterations(self):
def guarded_body(i, s):
with ops.control_dependencies((control_flow_ops.Assert(i < 3, (i,)),)):
return s + i,
s = control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda s: s < 3,
body=guarded_body,
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(constant_op.constant(0, dtype=dtypes.int64),))
self.assertEqual(self.evaluate(s), (3,))
@test_util.run_v2_only
def test_tf_dataset_no_loop_vars(self):
v = variables.Variable(0, dtype=dtypes.int64)
self.evaluate(v.initializer)
def stateless_with_side_effects(i):
v.assign(v.read_value() * 10 + i)
# function is important here, because ops test for its presence.
@def_function.function(autograph=False)
def test_fn():
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=None,
body=stateless_with_side_effects,
get_state=lambda: (),
set_state=lambda _: None,
init_vars=())
test_fn()
self.assertEqual(self.evaluate(v.read_value()), 1234)
def test_tf_iterator(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn():
itr = iter(dataset_ops.Dataset.range(5))
return control_flow.for_stmt(
itr,
extra_test=None,
body=lambda i, s: (s * 10 + i,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(constant_op.constant(0, dtype=dtypes.int64),))
s, = test_fn()
self.assertAllEqual(s, 1234)
@test_util.run_v2_only
def test_tf_iterator_no_loop_vars(self):
v = variables.Variable(0, dtype=dtypes.int64)
def stateless_with_side_effects(i):
v.assign(v.read_value() * 10 + i)
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn():
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=stateless_with_side_effects,
get_state=lambda: (),
set_state=lambda _: None,
init_vars=())
test_fn()
self.assertEqual(self.evaluate(v.read_value()), 1234)
class WhileLoopTest(test.TestCase):
@test_util.run_deprecated_v1
def test_tensor(self):
n = constant_op.constant(5)
results = control_flow.while_stmt(
test=lambda i, s: i < n,
body=lambda i, s: (i + 1, s + i),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0, 0))
self.assertEqual((5, 10), self.evaluate(results))
def test_tensor_with_tf_side_effects_in_cond(self):
n = constant_op.constant(5, dtype=dtypes.int64)
v = variables.Variable(0, dtype=dtypes.int64)
def get_and_increment(v):
v.assign(v.read_value() + 1)
return v.read_value()
# function is important here, because ops test for its presence.
@def_function.function(autograph=False)
def test_fn():
return control_flow.while_stmt(
test=lambda i: get_and_increment(v) < n,
body=lambda i: (i + 1,),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0,))
results = test_fn()
self.evaluate(v.initializer)
self.assertEqual(self.evaluate(results), (4,))
self.assertEqual(self.evaluate(v), (5,))
def test_tensor_with_python_state(self):
n = constant_op.constant(5)
class MutableObject(object):
field = constant_op.constant(0, dtype=dtypes.int32)
state = MutableObject()
def get_state():
return (state.field,)
def set_state(new_state):
state.field, = new_state
def body(i, s):
state.field += i
return (i + 1, s + i)
s = control_flow.while_stmt(
test=lambda i, s: i < n,
body=body,
get_state=get_state,
set_state=set_state,
init_vars=(0, 0))
self.assertEqual(self.evaluate(s), (5, 10))
self.assertEqual(self.evaluate(state.field), 10)
@test_util.run_deprecated_v1
def test_python_with_tensor_state(self):
n = 5
results = control_flow.while_stmt(
test=lambda i, s: i < n,
body=lambda i, s: (i + 1, s + i),
get_state=lambda: (),
set_state=lambda _: None,
init_vars=(0, constant_op.constant(0)))
result_i, result_s = results
self.assertEqual(5, result_i)
self.assertEqual(10, self.evaluate(result_s))
def test_python(self):
n = 5
results = control_flow.while_stmt(
test=lambda i, s: i < n,
body=lambda i, s: (i + 1, s + i),
get_state=None,
set_state=None,
init_vars=(0, 0))
self.assertEqual((5, 10), results)
def test_python_infinite_loop(self):
if __debug__:
with test.mock.patch.object(control_flow, 'PYTHON_MAX_ITERATIONS', 100):
with self.assertRaisesRegexp(ValueError, 'iteration limit'):
control_flow.while_stmt(
test=lambda _: True,
body=lambda i: (i + 1,),
get_state=None,
set_state=None,
init_vars=(0,))
def test_python_long_loop_unroll_warning(self):
if __debug__:
with test.mock.patch.object(
control_flow, 'INEFFICIENT_UNROLL_MIN_ITERATIONS', 10):
with ops.Graph().as_default():
out_capturer = six.StringIO()
with test.mock.patch.object(sys, 'stdout', out_capturer):
ag_logging.echo_log_to_stdout = True
sys.stdout = out_capturer
control_flow.while_stmt(
test=lambda i, _: i < 100,
body=lambda i, _: (i + 1, gen_math_ops.add(i, 1),),
get_state=None,
set_state=None,
init_vars=(0, None))
self.assertTrue(re.match(
r'.*ops.*loop.*large.*iterations.*Add.*',
out_capturer.getvalue()))
class IfStmtTest(test.TestCase):
def single_return_if_stmt(self, cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: 1,
orelse=lambda: -1,
get_state=lambda: (),
set_state=lambda _: None)
def multi_return_if_stmt(self, cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: (1, 2),
orelse=lambda: (-1, -2),
get_state=lambda: (),
set_state=lambda _: None)
@test_util.run_deprecated_v1
def test_tensor(self):
with self.cached_session():
t = self.single_return_if_stmt(constant_op.constant(True))
self.assertEqual(1, self.evaluate(t))
t = self.single_return_if_stmt(constant_op.constant(False))
self.assertEqual(-1, self.evaluate(t))
def test_python(self):
self.assertEqual(1, self.single_return_if_stmt(True))
self.assertEqual(-1, self.single_return_if_stmt(False))
@test_util.run_deprecated_v1
def test_tensor_multiple_returns(self):
with self.cached_session():
t = self.multi_return_if_stmt(constant_op.constant(True))
self.assertAllEqual([1, 2], self.evaluate(t))
t = self.multi_return_if_stmt(constant_op.constant(False))
self.assertAllEqual([-1, -2], self.evaluate(t))
def test_python_multiple_returns(self):
self.assertEqual((1, 2), self.multi_return_if_stmt(True))
self.assertEqual((-1, -2), self.multi_return_if_stmt(False))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/control_flow_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators specific to data structures: list append, subscripts, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
# TODO(mdan): Once control flow supports objects, repackage as a class.
def new_list(iterable=None):
"""The list constructor.
Args:
iterable: Optional elements to fill the list with.
Returns:
A list-like object. The exact return value depends on the initial elements.
"""
if iterable:
elements = tuple(iterable)
else:
elements = ()
if elements:
# When the list contains elements, it is assumed to be a "Python" lvalue
# list.
return _py_list_new(elements)
return tf_tensor_list_new(elements)
def tf_tensor_array_new(elements, element_dtype=None, element_shape=None):
"""Overload of new_list that stages a Tensor list creation."""
elements = tuple(ops.convert_to_tensor(el) for el in elements)
all_dtypes = set(el.dtype for el in elements)
if len(all_dtypes) == 1:
inferred_dtype, = tuple(all_dtypes)
if element_dtype is not None and element_dtype != inferred_dtype:
raise ValueError(
'incompatible dtype; specified: {}, inferred from {}: {}'.format(
element_dtype, elements, inferred_dtype))
elif len(all_dtypes) > 1:
raise ValueError(
'TensorArray requires all elements to have the same dtype:'
' {}'.format(elements))
else:
if element_dtype is None:
raise ValueError('dtype is required to create an empty TensorArray')
all_shapes = set(tuple(el.shape.as_list()) for el in elements)
if len(all_shapes) == 1:
inferred_shape, = tuple(all_shapes)
if element_shape is not None and element_shape != inferred_shape:
raise ValueError(
'incompatible shape; specified: {}, inferred from {}: {}'.format(
element_shape, elements, inferred_shape))
elif len(all_shapes) > 1:
raise ValueError(
'TensorArray requires all elements to have the same shape:'
' {}'.format(elements))
# TODO(mdan): We may want to allow different shapes with infer_shape=False.
else:
inferred_shape = None
if element_dtype is None:
element_dtype = inferred_dtype
if element_shape is None:
element_shape = inferred_shape
l = tensor_array_ops.TensorArray(
dtype=element_dtype,
size=len(elements),
dynamic_size=True,
infer_shape=(element_shape is None),
element_shape=element_shape)
for i, el in enumerate(elements):
l = l.write(i, el)
return l
def tf_tensor_list_new(elements, element_dtype=None, element_shape=None):
"""Overload of new_list that stages a Tensor list creation."""
if tensor_util.is_tensor(elements):
if element_shape is not None:
raise ValueError(
'element shape may not be specified when creating list from tensor')
element_shape = array_ops.shape(elements)[1:]
l = list_ops.tensor_list_from_tensor(elements, element_shape=element_shape)
return l
elements = tuple(ops.convert_to_tensor(el) for el in elements)
all_dtypes = set(el.dtype for el in elements)
if len(all_dtypes) == 1:
inferred_dtype = tuple(all_dtypes)[0]
if element_dtype is not None and element_dtype != inferred_dtype:
raise ValueError(
'incompatible dtype; specified: {}, inferred from {}: {}'.format(
element_dtype, elements, inferred_dtype))
elif all_dtypes:
# Heterogeneous lists are ok.
if element_dtype is not None:
raise ValueError(
'specified dtype {} is inconsistent with that of elements {}'.format(
element_dtype, elements))
inferred_dtype = dtypes.variant
else:
inferred_dtype = dtypes.variant
all_shapes = set(tuple(el.shape.as_list()) for el in elements)
if len(all_shapes) == 1:
inferred_shape = array_ops.shape(elements[0])
if element_shape is not None and element_shape != inferred_shape:
raise ValueError(
'incompatible shape; specified: {}, inferred from {}: {}'.format(
element_shape, elements, inferred_shape))
elif all_shapes:
# Heterogeneous lists are ok.
if element_shape is not None:
raise ValueError(
'specified shape {} is inconsistent with that of elements {}'.format(
element_shape, elements))
inferred_shape = constant_op.constant(-1) # unknown shape, by convention
else:
inferred_shape = constant_op.constant(-1) # unknown shape, by convention
if element_dtype is None:
element_dtype = inferred_dtype
if element_shape is None:
element_shape = inferred_shape
element_shape = ops.convert_to_tensor(element_shape, dtype=dtypes.int32)
l = list_ops.empty_tensor_list(
element_shape=element_shape, element_dtype=element_dtype)
for el in elements:
l = list_ops.tensor_list_push_back(l, el)
return l
def _py_list_new(elements):
"""Overload of new_list that creates a Python list."""
return list(elements)
def list_append(list_, x):
"""The list append function.
Note: it is unspecified where list_ will be mutated or not. If list_ is
a TensorFlow entity, it will not be typically mutated. If list_ is a plain
list, it will be. In general, if the list is mutated then the return value
should point to the original entity.
Args:
list_: An entity that supports append semantics.
x: The element to append.
Returns:
Same as list_, after the append was performed.
Raises:
ValueError: if list_ is not of a known list-like type.
"""
if isinstance(list_, tensor_array_ops.TensorArray):
return _tf_tensorarray_append(list_, x)
elif tensor_util.is_tensor(list_):
if list_.dtype == dtypes.variant:
return _tf_tensor_list_append(list_, x)
else:
raise ValueError(
'tensor lists are expected to be Tensors with dtype=tf.variant,'
' instead found %s' % list_)
else:
return _py_list_append(list_, x)
def _tf_tensor_list_append(list_, x):
"""Overload of list_append that stages a Tensor list write."""
def empty_list_of_elements_like_x():
tensor_x = ops.convert_to_tensor(x)
return list_ops.empty_tensor_list(
element_shape=array_ops.shape(tensor_x),
element_dtype=tensor_x.dtype)
list_ = control_flow_ops.cond(
list_ops.tensor_list_length(list_) > 0,
lambda: list_,
empty_list_of_elements_like_x,
)
return list_ops.tensor_list_push_back(list_, x)
def _tf_tensorarray_append(list_, x):
"""Overload of list_append that stages a TensorArray write."""
return list_.write(list_.size(), x)
def _py_list_append(list_, x):
"""Overload of list_append that executes a Python list append."""
# Revert to the original call.
list_.append(x)
return list_
class ListPopOpts(
collections.namedtuple('ListPopOpts', ('element_dtype', 'element_shape'))):
pass
def list_pop(list_, i, opts):
"""The list pop function.
Note: it is unspecified where list_ will be mutated or not. If list_ is
a TensorFlow entity, it will not be typically mutated. If list_ is a plain
list, it will be. In general, if the list is mutated then the return value
should point to the original entity.
Args:
list_: An entity that supports pop semantics.
i: Optional index to pop from. May be None.
opts: A ListPopOpts.
Returns:
Tuple (x, out_list_):
out_list_: same as list_, after the removal was performed.
x: the removed element value.
Raises:
ValueError: if list_ is not of a known list-like type or the operation is
not supported for that type.
"""
assert isinstance(opts, ListPopOpts)
if isinstance(list_, tensor_array_ops.TensorArray):
raise ValueError('TensorArray does not support item removal')
elif tensor_util.is_tensor(list_):
if list_.dtype == dtypes.variant:
return _tf_tensor_list_pop(list_, i, opts)
else:
raise ValueError(
'tensor lists are expected to be Tensors with dtype=tf.variant,'
' instead found %s' % list_)
else:
return _py_list_pop(list_, i)
def _tf_tensor_list_pop(list_, i, opts):
"""Overload of list_pop that stages a Tensor list pop."""
if i is not None:
raise NotImplementedError('tensor lists only support removing from the end')
if opts.element_dtype is None:
raise ValueError('cannot pop from a list without knowing its element '
'type; use set_element_type to annotate it')
if opts.element_shape is None:
raise ValueError('cannot pop from a list without knowing its element '
'shape; use set_element_type to annotate it')
list_out, x = list_ops.tensor_list_pop_back(
list_, element_dtype=opts.element_dtype)
x.set_shape(opts.element_shape)
return list_out, x
def _py_list_pop(list_, i):
"""Overload of list_pop that executes a Python list append."""
if i is None:
x = list_.pop()
else:
x = list_.pop(i)
return list_, x
# TODO(mdan): Look into reducing duplication between all these containers.
class ListStackOpts(
collections.namedtuple('ListStackOpts',
('element_dtype', 'original_call'))):
pass
def list_stack(list_, opts):
"""The list stack function.
This does not have a direct correspondent in Python. The closest idiom to
this is tf.append or np.stack. It's different from those in the sense that it
accepts a Tensor list, rather than a list of tensors. It can also accept
TensorArray. When the target is anything else, the dispatcher will rely on
ctx.original_call for fallback.
Args:
list_: An entity that supports append semantics.
opts: A ListStackOpts object.
Returns:
The output of the stack operation, typically a Tensor.
"""
assert isinstance(opts, ListStackOpts)
if isinstance(list_, tensor_array_ops.TensorArray):
return _tf_tensorarray_stack(list_)
elif tensor_util.is_tensor(list_):
if list_.dtype == dtypes.variant:
return _tf_tensor_list_stack(list_, opts)
else:
# No-op for primitive Tensor arguments.
return list_
else:
return _py_list_stack(list_, opts)
def _tf_tensorarray_stack(list_):
"""Overload of list_stack that stages a TensorArray stack."""
return list_.stack()
def _tf_tensor_list_stack(list_, opts):
"""Overload of list_stack that stages a Tensor list write."""
if opts.element_dtype is None:
raise ValueError('cannot stack a list without knowing its element type;'
' use set_element_type to annotate it')
return list_ops.tensor_list_stack(list_, element_dtype=opts.element_dtype)
def _py_list_stack(list_, opts):
"""Overload of list_stack that executes a Python list append."""
# Revert to the original call.
return opts.original_call(list_)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/data_structures.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception handling statements: assert, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import tf_inspect
def assert_stmt(expression1, expression2):
"""Functional form of an assert statement.
This follows the semantics of the Python assert statement, however the
concrete implementations may deviate from it. See the respective
implementation for details.
In general, the assert statement should not be used for control flow.
Furthermore, it is encouraged that the assertion expressions should not have
side effects.
Args:
expression1: Any
expression2: Callable[[], Any], returns the expression to include in the
error message when expression1 evaluates to False. When expression1 is
True, the result of expression2 will not be evaluated, however,
expression2 itself may be evaluated in some implementations.
Returns:
Any, implementation-dependent.
Raises:
ValueError: if any arguments are illegal.
"""
if not callable(expression2):
raise ValueError('{} must be a callable'.format(expression2))
args, _, keywords, _ = tf_inspect.getargspec(expression2)
if args or keywords:
raise ValueError('{} may not have any arguments'.format(expression2))
if tensor_util.is_tensor(expression1):
return _tf_assert_stmt(expression1, expression2)
else:
return _py_assert_stmt(expression1, expression2)
def _tf_assert_stmt(expression1, expression2):
"""Overload of assert_stmt that stages a TF Assert.
This implementation deviates from Python semantics as follows:
(1) the assertion is verified regardless of the state of __debug__
(2) on assertion failure, the graph execution will fail with
tensorflow.errors.ValueError, rather than AssertionError.
Args:
expression1: tensorflow.Tensor, must evaluate to a tf.bool scalar
expression2: Callable[[], Union[tensorflow.Tensor, List[tensorflow.Tensor]]]
Returns:
tensorflow.Operation
"""
expression2_tensors = expression2()
if not isinstance(expression2_tensors, list):
expression2_tensors = [expression2_tensors]
return control_flow_ops.Assert(expression1, expression2_tensors)
def _py_assert_stmt(expression1, expression2):
"""Overload of assert_stmt that executes a Python assert statement."""
assert expression1, expression2()
return None
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/exceptions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_structures module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class ListTest(test.TestCase):
def test_new_list_empty(self):
l = data_structures.new_list()
# Can't evaluate an empty list.
# TODO(mdan): sess.run should allow tf.variant maybe?
self.assertTrue(isinstance(l, ops.Tensor))
def test_new_list_tensor(self):
l = data_structures.new_list([3, 4, 5])
self.assertAllEqual(l, [3, 4, 5])
def test_tf_tensor_list_new(self):
l = data_structures.tf_tensor_list_new([3, 4, 5])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_list_new_empty(self):
l = data_structures.tf_tensor_list_new([],
element_dtype=dtypes.int32,
element_shape=())
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [])
def test_tf_tensor_list_new_from_tensor(self):
l = data_structures.tf_tensor_list_new(constant_op.constant([3, 4, 5]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
@test_util.run_deprecated_v1
def test_tf_tensor_list_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4.0])
# TODO(mdan): It might make more sense to type cast in this case.
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_dtype=dtypes.float32)
# Tensor lists do support heterogeneous lists.
self.assertIsNot(data_structures.tf_tensor_list_new([3, [4, 5]]), None)
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new(
constant_op.constant([1, 2, 3]), element_shape=[1])
def test_tf_tensor_array_new(self):
l = data_structures.tf_tensor_array_new([3, 4, 5])
t = l.stack()
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_array_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4.0])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_dtype=dtypes.float32)
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, [4, 5]])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([], element_shape=(2,))
# TAs can infer the shape.
self.assertIsNot(
data_structures.tf_tensor_array_new([], element_dtype=dtypes.float32),
None)
def test_append_tensor_list(self):
l = data_structures.new_list()
x = constant_op.constant([1, 2, 3])
l = data_structures.list_append(l, x)
t = list_ops.tensor_list_stack(l, element_dtype=x.dtype)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [[1, 2, 3]])
@test_util.run_v1_only("b/117943489")
def test_append_tensorarray(self):
l = tensor_array_ops.TensorArray(dtypes.int32, size=0, dynamic_size=True)
l1 = data_structures.list_append(l, 1)
l2 = data_structures.list_append(l1, 2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(l1.stack()), [1])
self.assertAllEqual(self.evaluate(l2.stack()), [1, 2])
def test_append_python(self):
l = []
self.assertAllEqual(data_structures.list_append(l, 1), [1])
self.assertAllEqual(data_structures.list_append(l, 2), [1, 2])
def test_pop_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListPopOpts(
element_dtype=initial_list.dtype,
element_shape=(2,))
with self.assertRaises(NotImplementedError):
data_structures.list_pop(l, 0, opts)
with self.cached_session() as sess:
l, x = data_structures.list_pop(l, None, opts)
self.assertAllEqual(self.evaluate(x), [3, 4])
t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
self.assertAllEqual(self.evaluate(t), [[1, 2]])
def test_pop_python(self):
l = [1, 2, 3]
opts = data_structures.ListPopOpts(element_dtype=None, element_shape=())
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1, 2], 3))
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1], 2))
def test_stack_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListStackOpts(
element_dtype=initial_list.dtype, original_call=None)
with self.cached_session() as sess:
t = data_structures.list_stack(l, opts)
self.assertAllEqual(self.evaluate(t), self.evaluate(initial_list))
@test_util.run_deprecated_v1
def test_stack_tensor_list_empty(self):
l = list_ops.empty_tensor_list(
element_shape=None, element_dtype=dtypes.variant)
opts = data_structures.ListStackOpts(
element_dtype=dtypes.int32, original_call=None)
# TODO(mdan): Allow stacking empty lists if the dtype and shape are known.
with self.assertRaises(ValueError):
data_structures.list_stack(l, opts)
def test_stack_fallback(self):
def dummy_function(l):
# Lazy person's mock: just transform the argument in a way in which we
# can check that this function was indeed called.
return [x * 2 for x in l]
opts = data_structures.ListStackOpts(
element_dtype=None, original_call=dummy_function)
self.assertAllEqual(data_structures.list_stack([1, 2], opts), [2, 4])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/data_structures_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slices module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import slices
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
class SlicesTest(test.TestCase):
def test_set_item_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
l = slices.set_item(l, 0, [5, 6])
with self.cached_session() as sess:
t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
self.assertAllEqual(self.evaluate(t), [[5, 6], [3, 4]])
def test_get_item_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
t = slices.get_item(
l, 1, slices.GetItemOpts(element_dtype=initial_list.dtype))
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4])
def test_get_item_tensor_string(self):
initial_str = constant_op.constant('abcd')
t = slices.get_item(initial_str, 1,
slices.GetItemOpts(element_dtype=initial_str.dtype))
with self.cached_session() as sess:
self.assertEqual(self.evaluate(t), b'b')
initial_list_str = constant_op.constant(['abcd', 'bcde'])
t = slices.get_item(initial_list_str, 1,
slices.GetItemOpts(element_dtype=initial_str.dtype))
with self.cached_session() as sess:
self.assertEqual(self.evaluate(t), b'bcde')
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/slices_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators specific to slicing operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
# TODO(mdan): Support extended slices.
class GetItemOpts(collections.namedtuple('GetItemOpts', ('element_dtype',))):
pass
def get_item(target, i, opts):
"""The slice read operator (i.e. __getitem__).
Note: it is unspecified whether target will be mutated or not. In general,
if target is mutable (like Python lists), it will be mutated.
Args:
target: An entity that supports getitem semantics.
i: Index to read from.
opts: A GetItemOpts object.
Returns:
The read element.
Raises:
ValueError: if target is not of a supported type.
"""
assert isinstance(opts, GetItemOpts)
if isinstance(target, tensor_array_ops.TensorArray):
return _tf_tensorarray_get_item(target, i)
elif tensor_util.is_tensor(target):
if target.dtype == dtypes.variant:
return _tf_tensor_list_get_item(target, i, opts)
elif target.dtype == dtypes.string and target.shape.ndims == 0:
return _tf_tensor_string_get_item(target, i)
else:
return _tf_tensor_get_item(target, i)
else:
return _py_get_item(target, i)
def _tf_tensorarray_get_item(target, i):
"""Overload of get_item that stages a TensorArray read."""
return target.read(i)
def _tf_tensor_list_get_item(target, i, opts):
"""Overload of get_item that stages a Tensor list read."""
if opts.element_dtype is None:
raise ValueError('cannot retrieve from a list without knowing its '
'element type; use set_element_type to annotate it')
x = list_ops.tensor_list_get_item(target, i, element_dtype=opts.element_dtype)
return x
def _tf_tensor_get_item(target, i):
"""Overload of get_item that stages a Tensor (not Tensor list) read."""
return target[i]
def _tf_tensor_string_get_item(target, i):
"""Overload of get_item that stages a Tensor string read."""
x = gen_string_ops.substr(target, i, 1)
return x
def _py_get_item(target, i):
"""Overload of get_item that executes a Python list modification."""
return target[i]
def set_item(target, i, x):
"""The slice write operator (i.e. __setitem__).
Note: it is unspecified whether target will be mutated or not. In general,
if target is mutable (like Python lists), it will be mutated.
Args:
target: An entity that supports setitem semantics.
i: Index to modify.
x: The new element value.
Returns:
Same as target, after the update was performed.
Raises:
ValueError: if target is not of a supported type.
"""
if isinstance(target, tensor_array_ops.TensorArray):
return _tf_tensorarray_set_item(target, i, x)
elif tensor_util.is_tensor(target):
if target.dtype == dtypes.variant:
return _tf_tensor_list_set_item(target, i, x)
else:
return _tf_tensor_set_item(target, i, x)
else:
return _py_set_item(target, i, x)
def _tf_tensorarray_set_item(target, i, x):
"""Overload of set_item that stages a TensorArray write."""
return target.write(i, x)
def _tf_tensor_list_set_item(target, i, x):
"""Overload of set_item that stages a Tensor list update."""
return list_ops.tensor_list_set_item(target, i, x)
def _tf_tensor_set_item(target, i, x):
"""Overload of set_item that stages a Tensor scatter update."""
return gen_array_ops.tensor_scatter_update(target, ((i,),), (x,))
def _py_set_item(target, i, x):
"""Overload of set_item that executes a Python list modification."""
target[i] = x
return target
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/slices.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exceptions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import exceptions
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ExceptionsTest(test.TestCase):
def test_assert_tf_untriggered(self):
with self.cached_session() as sess:
t = exceptions.assert_stmt(
constant_op.constant(True), lambda: constant_op.constant('ignored'))
self.evaluate(t)
@test_util.run_deprecated_v1
def test_assert_tf_triggered(self):
with self.cached_session() as sess:
t = exceptions.assert_stmt(
constant_op.constant(False),
lambda: constant_op.constant('test message'))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'test message'):
self.evaluate(t)
@test_util.run_deprecated_v1
def test_assert_tf_multiple_printed_values(self):
two_tensors = [
constant_op.constant('test message'),
constant_op.constant('another message')
]
with self.cached_session() as sess:
t = exceptions.assert_stmt(
constant_op.constant(False), lambda: two_tensors)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'test message.*another message'):
self.evaluate(t)
def test_assert_python_untriggered(self):
side_effect_trace = []
def expression_with_side_effects():
side_effect_trace.append(object())
return 'test message'
exceptions.assert_stmt(True, expression_with_side_effects)
self.assertListEqual(side_effect_trace, [])
def test_assert_python_triggered(self):
if not __debug__:
# Python assertions only be tested when in debug mode.
return
side_effect_trace = []
tracer = object()
def expression_with_side_effects():
side_effect_trace.append(tracer)
return 'test message'
with self.assertRaisesRegexp(AssertionError, 'test message'):
exceptions.assert_stmt(False, expression_with_side_effects)
self.assertListEqual(side_effect_trace, [tracer])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/exceptions_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_builtins module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrappers
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class TestBase(object):
def plus_twenty(self, x):
return x + 20
class PyBuiltinsTest(test.TestCase):
def test_abs(self):
self.assertEqual(py_builtins.abs_(-1), 1)
with self.cached_session() as sess:
t = py_builtins.abs_(constant_op.constant(-1))
self.assertEqual(self.evaluate(t), 1)
t = py_builtins.abs_(constant_op.constant([-1, 2, -3]))
self.assertAllEqual(self.evaluate(t), [1, 2, 3])
def test_float(self):
self.assertEqual(py_builtins.float_(10), 10.0)
self.assertEqual(py_builtins.float_('10.0'), 10.0)
with self.cached_session() as sess:
t = py_builtins.float_(constant_op.constant(1, dtype=dtypes.int64))
self.assertEqual(self.evaluate(t), 1.0)
st = py_builtins.float_(constant_op.constant('1.0'))
self.assertEqual(self.evaluate(st), 1.0)
def test_int(self):
self.assertEqual(py_builtins.int_(10.0), 10)
self.assertEqual(py_builtins.int_('11', 2), 3)
with self.cached_session() as sess:
t = py_builtins.int_(constant_op.constant(1, dtype=dtypes.float64))
self.assertEqual(self.evaluate(t), 1)
st = py_builtins.int_(constant_op.constant('1'))
self.assertEqual(self.evaluate(st), 1)
st = py_builtins.int_(constant_op.constant('1'), 10)
self.assertEqual(self.evaluate(st), 1)
def test_int_unsupported_base(self):
t = constant_op.constant(1, dtype=dtypes.float64)
with self.assertRaises(NotImplementedError):
py_builtins.int_(t, 2)
def test_len(self):
self.assertEqual(py_builtins.len_([1, 2, 3]), 3)
with self.cached_session() as sess:
t = py_builtins.len_(constant_op.constant([[1], [2], [3]]))
self.assertEqual(t, 3)
ta = py_builtins.len_(tensor_array_ops.TensorArray(dtypes.int32, size=5))
self.assertEqual(self.evaluate(ta), 5)
tl = py_builtins.len_(data_structures.tf_tensor_list_new([3, 4, 5]))
self.assertEqual(self.evaluate(tl), 3)
def test_len_scalar(self):
with self.assertRaises(ValueError):
py_builtins.len_(constant_op.constant(1))
@test_util.run_deprecated_v1
def test_len_dynamic_shape(self):
with self.cached_session() as sess:
p = array_ops.placeholder(dtype=dtypes.int32, shape=None)
t = py_builtins.len_(p)
self.assertEqual(sess.run(t, {p: [1, 2, 3]}), 3)
with self.assertRaises(errors_impl.InvalidArgumentError):
t = py_builtins.len_(p)
sess.run(t, {p: 1})
@test_util.run_deprecated_v1
def test_print_tensors(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(py_builtins.print_(constant_op.constant('test message'), 1))
self.assertEqual(out_capturer.getvalue(), 'test message 1\n')
finally:
sys.stdout = sys.__stdout__
@test_util.run_deprecated_v1
def test_print_complex(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(
py_builtins.print_(constant_op.constant('test message'), [1, 2]))
self.assertEqual(out_capturer.getvalue(), 'test message [1, 2]\n')
finally:
sys.stdout = sys.__stdout__
def test_range(self):
self.assertListEqual(list(py_builtins.range_(3)), [0, 1, 2])
self.assertListEqual(list(py_builtins.range_(1, 3)), [1, 2])
self.assertListEqual(list(py_builtins.range_(2, 0, -1)), [2, 1])
def test_range_tensor(self):
with self.cached_session() as sess:
r = py_builtins.range_(constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [0, 1, 2])
r = py_builtins.range_(1, constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [1, 2])
r = py_builtins.range_(2, 0, constant_op.constant(-1))
self.assertAllEqual(self.evaluate(r), [2, 1])
def test_range_tensor_empty_range(self):
with self.session() as sess:
r = py_builtins.range_(constant_op.constant(-3))
self.assertAllEqual(self.evaluate(r), [])
r = py_builtins.range_(5, constant_op.constant(2))
self.assertAllEqual(self.evaluate(r), [])
def test_enumerate(self):
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1])), [(0, 3), (1, 2), (2, 1)])
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1], 5)), [(5, 3), (6, 2), (7, 1)])
self.assertListEqual(list(py_builtins.enumerate_([-8], -3)), [(-3, -8)])
def test_enumerate_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices(['a', 'c'])
start = constant_op.constant(20, dtype=dtypes.int64)
dataset = py_builtins.enumerate_(dataset, start)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (20, b'a'))
self.assertAllEqual(self.evaluate(iterator.get_next()), (21, b'c'))
def test_zip(self):
self.assertListEqual(
list(py_builtins.zip_([3, 2, 1], [1, 2, 3])), [(3, 1), (2, 2), (1, 3)])
self.assertListEqual(
list(py_builtins.zip_([4, 5, 6], [-1, -2])), [(4, -1), (5, -2)])
def test_zip_dataset(self):
ds1 = dataset_ops.DatasetV2.from_tensor_slices([-11, -12, 4])
ds2 = dataset_ops.DatasetV2.from_tensor_slices([-21, -22, 5])
ds3 = py_builtins.zip_(ds1, ds2)
iterator = dataset_ops.make_one_shot_iterator(ds3)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (-11, -21))
self.assertAllEqual(self.evaluate(iterator.get_next()), (-12, -22))
self.assertAllEqual(self.evaluate(iterator.get_next()), (4, 5))
def _basic_function_scope(self):
return function_wrappers.FunctionScope(
'test_function_name',
'test_scope', # Note: this must match the name in the `with` statement.
converter.ConversionOptions())
def test_eval_in_original_context(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
self.assertEqual(test_fn(), 1)
def test_eval_in_original_context_inner_function(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
l = 2 # pylint:disable=unused-variable
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
return inner_fn()
self.assertEqual(test_fn(), 2)
def test_super_in_original_context_unary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def plus_twenty(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base_unbound = py_builtins.super_in_original_context(
super, (TestSubclass,), test_scope)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.plus_twenty(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_binary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def plus_twenty(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base = py_builtins.super_in_original_context(
super, (TestSubclass, self), test_scope)
return test_base.plus_twenty(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/py_builtins_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_builtins_py3 module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrappers
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.platform import test
class TestBaseClass(object):
def overridden_method(self, x):
return x + 20
class PyBuiltinsTest(test.TestCase):
def _basic_function_scope(self):
return function_wrappers.FunctionScope(
'test_function_name',
'test_scope', # Note: this must match the name in the `with` statement.
converter.ConversionOptions())
def test_super_in_original_context_niladic_call(self):
test_case_self = self
class TestSubclass(TestBaseClass):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
b = py_builtins.super_in_original_context(super, (), test_scope)
return b.overridden_method(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_caller_with_locals(self):
test_case_self = self
class TestSubclass(TestBaseClass):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
y = 7
with test_case_self._basic_function_scope() as test_scope:
z = 7
return py_builtins.super_in_original_context(
super, (), test_scope).overridden_method(x + y - z)
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_super_in_original_context_inner_function(self):
test_case_self = self
class TestSubclass(TestBaseClass):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
with test_case_self._basic_function_scope() as test_scope:
# Oddly, it's sufficient to use `self` in an inner function
# to gain access to __class__ in this scope.
# TODO(mdan): Is this true across implementations?
# Note: normally, it's illegal to use super() in inner functions (it
# throws an error), but the generated code may create them.
def inner_fn():
return py_builtins.super_in_original_context(
super, (), test_scope).overridden_method(x)
return inner_fn()
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_super_in_original_context_inner_lambda(self):
test_case_self = self
class TestSubclass(TestBaseClass):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
with test_case_self._basic_function_scope() as test_scope:
# Oddly, it's sufficient to use `self` in an inner function
# to gain access to __class__ in this scope.
# TODO(mdan): Is this true across implementations?
# Note: normally, it's illegal to use super() in inner functions (it
# throws an error), but the generated code may create them.
l = lambda: py_builtins.super_in_original_context( # pylint:disable=g-long-lambda
super, (), test_scope).overridden_method(x)
return l()
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/operators/py_builtins_py3_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lowers list comprehensions into for and if statements.
Example:
result = [x * x for x in xs]
becomes
result = []
for x in xs:
elt = x * x
result.append(elt)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import templates
# TODO(mdan): This should covert directly to operator calls.
class ListCompTransformer(converter.Base):
"""Lowers list comprehensions into standard control flow."""
def visit_Assign(self, node):
if not isinstance(node.value, gast.ListComp):
return self.generic_visit(node)
if len(node.targets) > 1:
raise NotImplementedError('multiple assignments')
target, = node.targets
list_comp_node = node.value
template = """
target = []
"""
initialization = templates.replace(template, target=target)
template = """
target.append(elt)
"""
body = templates.replace(template, target=target, elt=list_comp_node.elt)
for gen in reversed(list_comp_node.generators):
for gen_if in reversed(gen.ifs):
template = """
if test:
body
"""
body = templates.replace(template, test=gen_if, body=body)
template = """
for target in iter_:
body
"""
body = templates.replace(
template, iter_=gen.iter, target=gen.target, body=body)
return initialization + body
def transform(node, ctx):
return ListCompTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/list_comprehensions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for logical_expressions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class LogicalExpressionTest(converter_testing.TestCase):
@test_util.run_deprecated_v1
def test_equals(self):
def test_fn(a, b):
return a == b
with self.converted(test_fn, logical_expressions, {}) as result:
with self.cached_session() as sess:
self.assertTrue(sess.run(result.test_fn(constant_op.constant(1), 1)))
self.assertFalse(sess.run(result.test_fn(constant_op.constant(1), 2)))
@test_util.run_deprecated_v1
def test_bool_ops(self):
def test_fn(a, b, c):
return (a or b) and (a or b or c) and not c
with self.converted(test_fn, logical_expressions, {}) as result:
with self.cached_session() as sess:
self.assertTrue(
sess.run(result.test_fn(constant_op.constant(True), False, False)))
self.assertFalse(
sess.run(result.test_fn(constant_op.constant(True), False, True)))
@test_util.run_deprecated_v1
def test_comparison(self):
def test_fn(a, b, c, d):
return a < b == c > d
with self.converted(test_fn, logical_expressions, {}) as result:
with self.cached_session() as sess:
# Note: having just the first constant a tensor tests that the
# operations execute in the correct order. If anything other than
# a < b executed first, the result would be a Python scalar and not a
# Tensor. This is valid as long as the dispat is automatic based on
# type.
self.assertTrue(
sess.run(result.test_fn(constant_op.constant(1), 2, 2, 1)))
self.assertFalse(
sess.run(result.test_fn(constant_op.constant(1), 2, 2, 3)))
def test_default_ops(self):
def test_fn(a, b):
return a in b
with self.converted(test_fn, logical_expressions, {}) as result:
self.assertTrue(result.test_fn('a', ('a',)))
def test_unary_ops(self):
def test_fn(a):
return ~a, -a, +a
with self.converted(test_fn, logical_expressions, {}) as result:
self.assertEqual(result.test_fn(1), (-2, -1, 1))
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/logical_expressions_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for continue_statements module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ContinueCanonicalizationTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, test_fn, *inputs):
with self.converted(test_fn, continue_statements, {'ops': ops},
(constant_op.constant,)) as result:
self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
def test_basic(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_multiple_continues(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
if x > 1:
continue
if x > 2:
continue
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_multiple_continues_in_nested_scope(self):
def test_fn(a):
v = []
for x in a:
x -= 1
if x > 100:
continue
try:
raise ValueError('intentional')
except ValueError:
continue
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, [])
self.assertTransformedEquivalent(test_fn, [1])
self.assertTransformedEquivalent(test_fn, [2])
self.assertTransformedEquivalent(test_fn, [1, 2, 3])
def test_for_loop(self):
def test_fn(a):
v = []
for x in a:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, [])
self.assertTransformedEquivalent(test_fn, [1])
self.assertTransformedEquivalent(test_fn, [2])
self.assertTransformedEquivalent(test_fn, [1, 2, 3])
def test_nested_with(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_nested_multiple_withs(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_nested_multiple_withs_and_statements(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
v.append(x)
v.append(x)
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_nested_multiple_withs_and_nested_withs(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
with ops.name_scope(''):
v.append(x)
v.append(x)
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_nested(self):
def test_fn(x):
v = []
u = []
w = []
while x > 0:
x -= 1
if x % 2 == 0:
if x % 3 != 0:
u.append(x)
else:
w.append(x)
continue
v.append(x)
return v, u, w
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_multiple_guarded_continues_with_side_effects(self):
def test_fn(x):
def track(u, x):
u.append(x)
return x
u = []
v = []
while x > 0:
x -= 1
if track(u, x) > 1:
continue
if track(u, x) > 2:
continue
v.append(x)
return u, v
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 2)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/continue_statements_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for list operations.
This includes converting Python lists to TensorArray/TensorList.
"""
# TODO(mdan): Elaborate the logic here.
# TODO(mdan): Does it even make sense to attempt to try to use TAs?
# The current rule (always convert to TensorArray) is naive and insufficient.
# In general, a better mechanism could look like:
# * convert to TensorList by default
# * leave as Python list if the user explicitly forbids it
# * convert to TensorArray only when complete write once behavior can be
# guaranteed (e.g. list comprehensions)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
# Tags for local state.
POP_USES = 'pop_uses'
class ListTransformer(converter.Base):
"""Converts lists and related operations to their TF counterpart."""
def visit_List(self, node):
node = self.generic_visit(node)
template = """
ag__.new_list(elements)
"""
return templates.replace_as_expression(template, elements=node)
def _replace_append_call(self, node):
assert len(node.args) == 1
assert isinstance(node.func, gast.Attribute)
template = """
target = ag__.list_append(target, element)
"""
return templates.replace(
template,
target=node.func.value,
element=node.args[0])
def _replace_pop_call(self, node):
# Expressions that use pop() are converted to a statement + expression.
#
# For example:
#
# print(target.pop())
#
# ... is converted to:
#
# target, target_pop = ag__.list_pop(target)
# print(target_pop)
#
# Here, we just generate the variable name and swap it in,
# and _generate_pop_operation will handle the rest.
#
# Multiple uses of pop() are allowed:
#
# print(tartget.pop(), target.pop())
# print(tartget.pop().pop())
#
assert isinstance(node.func, gast.Attribute)
scope = anno.getanno(node, NodeAnno.ARGS_SCOPE)
target_node = node.func.value
# Attempt to use a related name if one exists. Otherwise use something
# generic.
if anno.hasanno(target_node, anno.Basic.QN):
target_name = anno.getanno(target_node, anno.Basic.QN).ssf()
else:
target_name = 'list_'
pop_var_name = self.ctx.namer.new_symbol(target_name, scope.referenced)
pop_uses = self.get_local(POP_USES, [])
pop_uses.append((node, pop_var_name))
self.set_local(POP_USES, pop_uses)
return templates.replace_as_expression('var_name', var_name=pop_var_name)
def _replace_stack_call(self, node):
assert len(node.args) == 1
dtype = self.get_definition_directive(
node.args[0],
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.list_stack(
target,
opts=ag__.ListStackOpts(
element_dtype=dtype,
original_call=orig_call))
"""
return templates.replace_as_expression(
template,
dtype=dtype,
target=node.args[0],
orig_call=node.func)
def visit_Call(self, node):
node = self.generic_visit(node)
# TODO(mdan): This is insufficient if target is a function argument.
# In the case of function arguments, we need to add the list to the
# function's return value, because it is being modified.
# TODO(mdan): Checking just the name is brittle, can it be improved?
if isinstance(node.func, gast.Attribute):
func_name = node.func.attr
if func_name == 'append' and (len(node.args) == 1):
node = self._replace_append_call(node)
elif func_name == 'pop' and (len(node.args) <= 1):
node = self._replace_pop_call(node)
elif (func_name == 'stack' and (len(node.args) == 1) and
(not node.keywords or node.keywords[0].arg == 'strict')):
# This avoids false positives with keyword args.
# TODO(mdan): handle kwargs properly.
node = self._replace_stack_call(node)
return node
def _generate_pop_operation(self, original_call_node, pop_var_name):
assert isinstance(original_call_node.func, gast.Attribute)
if original_call_node.args:
pop_element = original_call_node.args[0]
else:
pop_element = parser.parse_expression('None')
# The call will be something like "target.pop()", and the dtype is hooked to
# target, hence the func.value.
# TODO(mdan): For lists of lists, this won't work.
# The reason why it won't work is because it's unclear how to annotate
# the list as a "list of lists with a certain element type" when using
# operations like `l.pop().pop()`.
dtype = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
shape = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'shape',
default=templates.replace_as_expression('None'))
template = """
target, pop_var_name = ag__.list_pop(
target, element,
opts=ag__.ListPopOpts(element_dtype=dtype, element_shape=shape))
"""
return templates.replace(
template,
target=original_call_node.func.value,
pop_var_name=pop_var_name,
element=pop_element,
dtype=dtype,
shape=shape)
def _postprocess_statement(self, node):
"""Inserts any separate pop() calls that node may use."""
pop_uses = self.get_local(POP_USES, None)
if pop_uses:
replacements = []
for original_call_node, pop_var_name in pop_uses:
replacements.extend(
self._generate_pop_operation(original_call_node, pop_var_name))
replacements.append(node)
node = replacements
self.exit_local_scope()
return node, None
# TODO(mdan): Should we have a generic visit_block instead?
# Right now it feels that a visit_block would add too much magic that's
# hard to follow.
def _visit_and_process_block(self, block):
return self.visit_block(
block,
before_visit=self.enter_local_scope,
after_visit=self._postprocess_statement)
def visit_FunctionDef(self, node):
node.args = self.generic_visit(node.args)
node.decorator_list = self.visit_block(node.decorator_list)
node.body = self._visit_and_process_block(node.body)
return node
def visit_For(self, node):
node.target = self.visit(node.target)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_and_process_block(node.body)
return node
def transform(node, ctx):
return ListTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/lists.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for return_statements module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class SingleReturnTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, test_fn, *inputs):
ns = {'ops': ops}
with self.converted(test_fn, return_statements, ns) as result:
self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
def test_straightline(self):
def test_fn(x):
return x * x
self.assertTransformedEquivalent(test_fn, 2)
def test_superfluous_returns(self):
def test_fn():
retval = 1
return retval
retval = 2 # pylint:disable=unreachable
return retval
self.assertTransformedEquivalent(test_fn)
def test_superfluous_returns_adjacent(self):
def test_fn():
return 1
return 2 # pylint:disable=unreachable
self.assertTransformedEquivalent(test_fn)
def test_conditional(self):
def test_fn(x):
if x > 0:
return x
else:
return x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_contitional_missing_else(self):
def test_fn(x):
if x > 0:
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_conditional_missing_else_then_default(self):
def test_fn(x):
if x > 0:
return x
return x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_conditional_else_only_then_default(self):
def test_fn(x):
if x < 0:
x *= x
else:
return x
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_conditional_nested(self):
def test_fn(x):
if x > 0:
if x < 5:
return x
else:
return x * x
else:
return x * x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
self.assertTransformedEquivalent(test_fn, 5)
def test_context_manager(self):
def test_fn(x):
with ops.name_scope(''):
return x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_context_manager_in_conditional(self):
def test_fn(x):
if x > 0:
with ops.name_scope(''):
return x * x
else:
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def text_conditional_in_context_manager(self):
def test_fn(x):
with ops.name_scope(''):
if x > 0:
return x * x
else:
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_no_return(self):
def test_fn(x):
x *= x
self.assertTransformedEquivalent(test_fn, 2)
def test_nested_function(self):
def test_fn(x):
def inner_fn(y):
if y > 0:
return y * y
else:
return y
return inner_fn(x)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_nested_function_in_control_flow(self):
def test_fn(x):
if x:
def inner_fn(y):
return y
inner_fn(x)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_for_loop(self):
def test_fn(n):
for _ in range(n):
return 1
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, 0)
def test_while_loop(self):
def test_fn(n):
i = 0
s = 0
while i < n:
i += 1
s += i
if s > 4:
return s
return -1
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, 4)
def test_null_return(self):
def test_fn(n):
if n > 4:
return
return
self.assertTransformedEquivalent(test_fn, 4)
self.assertTransformedEquivalent(test_fn, 5)
def test_nested_multiple_withs(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
return v
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_multiple_returns_in_nested_scope(self):
def test_fn(a):
v = []
for x in a:
x -= 1
if x > 100:
return v
try:
raise ValueError('intentional')
except ValueError: # pylint:disable=bare-except
return v
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, [])
self.assertTransformedEquivalent(test_fn, [1])
self.assertTransformedEquivalent(test_fn, [2])
self.assertTransformedEquivalent(test_fn, [1, 2, 3])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/return_statements_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lists module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import lists
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
tf = None # Will be replaced by a mock.
class ListTest(converter_testing.TestCase):
def test_empty_list(self):
def test_fn():
return []
with self.converted(test_fn, lists, {}) as result:
tl = result.test_fn()
# Empty tensor lists cannot be evaluated or stacked.
self.assertTrue(isinstance(tl, ops.Tensor))
self.assertEqual(tl.dtype, dtypes.variant)
def test_initialized_list(self):
def test_fn():
return [1, 2, 3]
with self.converted(test_fn, lists, {}) as result:
self.assertAllEqual(result.test_fn(), [1, 2, 3])
def test_list_append(self):
def test_fn():
l = special_functions.tensor_list([1])
l.append(2)
l.append(3)
return l
ns = {'special_functions': special_functions}
with self.converted(test_fn, lists, ns) as result:
with self.cached_session() as sess:
tl = result.test_fn()
r = list_ops.tensor_list_stack(tl, dtypes.int32)
self.assertAllEqual(self.evaluate(r), [1, 2, 3])
def test_list_pop(self):
def test_fn():
l = special_functions.tensor_list([1, 2, 3])
s = l.pop()
return s, l
ns = {'special_functions': special_functions}
node, ctx = self.prepare(test_fn, ns)
def_, = anno.getanno(node.body[0].targets[0],
anno.Static.ORIG_DEFINITIONS)
def_.directives[directives.set_element_type] = {
'dtype': parser.parse_expression('tf.int32'),
'shape': parser.parse_expression('()'),
}
node = lists.transform(node, ctx)
with self.compiled(node, ns, (dtypes.int32,)) as result:
with self.cached_session() as sess:
ts, tl = result.test_fn()
r = list_ops.tensor_list_stack(tl, dtypes.int32)
self.assertAllEqual(self.evaluate(r), [1, 2])
self.assertAllEqual(self.evaluate(ts), 3)
def test_double_list_pop(self):
def test_fn(l):
s = l.pop().pop()
return s
with self.converted(test_fn, lists, {}) as result:
test_input = [1, 2, [1, 2, 3]]
# TODO(mdan): Pass a list of lists of tensor when we fully support that.
# For now, we just pass a regular Python list of lists just to verify that
# the two pop calls are sequenced properly.
self.assertAllEqual(result.test_fn(test_input), 3)
def test_list_stack(self):
def test_fn():
l = [1, 2, 3]
return tf.stack(l)
node, ctx = self.prepare(test_fn, {})
def_, = anno.getanno(node.body[0].targets[0],
anno.Static.ORIG_DEFINITIONS)
def_.directives[directives.set_element_type] = {
'dtype': parser.parse_expression('tf.int32')
}
node = lists.transform(node, ctx)
with self.compiled(node, {}, (array_ops.stack, dtypes.int32)) as result:
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(result.test_fn()), [1, 2, 3])
# TODO(mdan): Add a test with tf.stack with axis kwarg.
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/lists_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes functions with multiple returns to use just one."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
BODY_DEFINITELY_RETURNS = 'BODY_DEFINITELY_RETURNS'
ORELSE_DEFINITELY_RETURNS = 'ORELSE_DEFINITELY_RETURNS'
STMT_DEFINITELY_RETURNS = 'STMT_DEFINITELY_RETURNS'
class _RewriteBlock(object):
def __init__(self):
self.definitely_returns = False
class ConditionalReturnRewriter(converter.Base):
"""Rewrites a a pattern where it's unbovious that all paths return a value.
This rewrite allows avoiding intermediate None return values.
The following pattern:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
is converted to:
if cond:
<block 1>
return
else:
<block 2>
<block 3>
and vice-versa (if the else returns, subsequent statements are moved under the
if branch).
"""
def visit_Return(self, node):
self.state[_RewriteBlock].definitely_returns = True
return node
def _postprocess_statement(self, node):
# If the node definitely returns (e.g. it's a with statement with a
# return stateent in it), then the current block also definitely returns.
if anno.getanno(node, STMT_DEFINITELY_RETURNS, default=False):
self.state[_RewriteBlock].definitely_returns = True
# The special case: collapse a typical conditional return pattern into
# a single conditional with possibly returns on both branches. This
# reduces the use of None return values, which don't work with TF
# conditionals.
if (isinstance(node, gast.If)
and anno.getanno(node, BODY_DEFINITELY_RETURNS, default=False)):
return node, node.orelse
elif (isinstance(node, gast.If)
and anno.getanno(node, ORELSE_DEFINITELY_RETURNS, default=False)):
return node, node.body
return node, None
def _visit_statement_block(self, node, nodes):
self.state[_RewriteBlock].enter()
new_nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
block_definitely_returns = self.state[_RewriteBlock].definitely_returns
self.state[_RewriteBlock].exit()
return new_nodes, block_definitely_returns
def visit_While(self, node):
node.test = self.visit(node.test)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body, definitely_returns = self._visit_statement_block(node, node.body)
if definitely_returns:
anno.setanno(node, STMT_DEFINITELY_RETURNS, True)
return node
def visit_Try(self, node):
# We could decide whether a 'try' DEFINITELY_RETURNS based on its components
# It is not clear whether we want to do anything with this given
# a 'try' is likely to throw an exception in some circumstances.
node.body, _ = self._visit_statement_block(node, node.body)
node.orelse, _ = self._visit_statement_block(node, node.orelse)
node.finalbody, _ = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
# To determine whether `try` DEFINITELY_RETURNS we need to revisit this.
node.body, _ = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body, body_definitely_returns = self._visit_statement_block(
node, node.body)
if body_definitely_returns:
anno.setanno(node, BODY_DEFINITELY_RETURNS, True)
node.orelse, orelse_definitely_returns = self._visit_statement_block(
node, node.orelse)
if orelse_definitely_returns:
anno.setanno(node, ORELSE_DEFINITELY_RETURNS, True)
if body_definitely_returns and orelse_definitely_returns:
self.state[_RewriteBlock].definitely_returns = True
return node
def visit_FunctionDef(self, node):
node.args = self.visit(node.args)
node.body, _ = self._visit_statement_block(node, node.body)
return node
class _Block(object):
def __init__(self):
self.is_function = False
self.return_used = False
self.create_guard_next = False
self.create_guard_now = False
def __repr__(self):
return 'used: {}'.format(
self.return_used)
class _Function(object):
def __init__(self):
self.do_return_var_name = None
self.retval_var_name = None
def __repr__(self):
return 'return control: {}, return value: {}'.format(
self.do_return_var_name, self.retval_var_name)
class ReturnStatementsTransformer(converter.Base):
"""Lowers return statements into variables and conditionals.
Specifically, the following pattern:
<block 1>
return val
<block 2>
is converted to:
do_return = False
retval = None
<block 1>
do_return = True
retval = val
if not do_return:
<block 2>
return retval
The conversion adjusts loops as well:
<block 1>
while cond:
<block 2>
return retval
is converted to:
<block 1>
while not do_return and cond:
<block 2>
do_return = True
retval = val
"""
def __init__(self, ctx, default_to_null_return):
super(ReturnStatementsTransformer, self).__init__(ctx)
self.default_to_null_return = default_to_null_return
def visit_Return(self, node):
for block in reversed(self.state[_Block].stack):
block.return_used = True
block.create_guard_next = True
if block.is_function:
break
retval = node.value if node.value else parser.parse_expression('None')
template = """
do_return_var_name = True
retval_var_name = retval
"""
node = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
retval_var_name=self.state[_Function].retval_var_name,
retval=retval)
return node
def _postprocess_statement(self, node):
if not self.state[_Block].return_used:
return node, None
state = self.state[_Block]
if state.create_guard_now:
template = """
if ag__.not_(do_return_var_name):
original_node
"""
cond, = templates.replace(
template,
do_return_var_name=self.state[_Function].do_return_var_name,
original_node=node)
node, block = cond, cond.body
else:
node, block = node, None
state.create_guard_now = state.create_guard_next
state.create_guard_next = False
return node, block
def _visit_statement_block(self, node, nodes):
self.state[_Block].enter()
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
self.state[_Block].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
node.test = templates.replace_as_expression(
'ag__.and_(lambda: ag__.not_(control_var), lambda: test)',
test=node.test,
control_var=self.state[_Function].do_return_var_name)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_For(self, node):
node.iter = self.visit(node.iter)
node.target = self.visit(node.target)
# Add the check for return to the loop condition.
node.body = self._visit_statement_block(node, node.body)
if self.state[_Block].return_used:
extra_test = anno.getanno(node, 'extra_test', default=None)
if extra_test is not None:
extra_test = templates.replace_as_expression(
'ag__.and_(lambda: ag__.not_(control_var), lambda: extra_test)',
extra_test=extra_test,
control_var=self.state[_Function].do_return_var_name)
else:
extra_test = templates.replace_as_expression(
'ag__.not_(control_var)',
control_var=self.state[_Function].do_return_var_name)
anno.setanno(node, 'extra_test', extra_test)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_statement_block(node, node.body)
return node
def visit_Try(self, node):
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
node.finalbody = self._visit_statement_block(node, node.finalbody)
node.handlers = self.visit_block(node.handlers)
return node
def visit_ExceptHandler(self, node):
node.body = self._visit_statement_block(node, node.body)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_statement_block(node, node.body)
node.orelse = self._visit_statement_block(node, node.orelse)
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
self.state[_Block].enter()
self.state[_Block].is_function = True
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
do_return_var_name = self.ctx.namer.new_symbol(
'do_return', scope.referenced)
retval_var_name = self.ctx.namer.new_symbol('retval_', scope.referenced)
self.state[_Function].do_return_var_name = do_return_var_name
self.state[_Function].retval_var_name = retval_var_name
converted_body = self._visit_statement_block(node, node.body)
# Avoid placing statements before any eventual docstring.
# TODO(mdan): Should a docstring even be included in the output?
docstring = None
if converted_body:
if (isinstance(converted_body[0], gast.Expr) and
isinstance(converted_body[0].value, gast.Constant)):
docstring = converted_body[0]
converted_body = converted_body[1:]
if self.state[_Block].return_used:
if self.default_to_null_return:
template = """
do_return_var_name = False
retval_var_name = ag__.UndefinedReturnValue()
body
# TODO(b/134753123) Remove the do_return_var_name tuple.
(do_return_var_name,)
return ag__.retval(retval_var_name)
"""
else:
# TODO(b/134753123) Fix loops that return when do_return is not set.
template = """
body
return retval_var_name
"""
node.body = templates.replace(
template,
body=converted_body,
do_return_var_name=do_return_var_name,
retval_var_name=retval_var_name)
if docstring:
node.body.insert(0, docstring)
self.state[_Block].exit()
self.state[_Function].exit()
return node
def transform(node, ctx, default_to_null_return=True):
"""Ensure a function has only a single return."""
# Note: Technically, these two could be merged into a single walk, but
# keeping them separate helps with readability.
node = ConditionalReturnRewriter(ctx).visit(node)
transformer = ReturnStatementsTransformer(
ctx, default_to_null_return=default_to_null_return)
node = transformer.visit(node)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/return_statements.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for logical expressions, e.g. `a and b -> tf.logical_and(a, b)`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
# TODO(mdan): Properly extrack boolean ops according to lazy eval rules.
# Note that this isn't completely safe either, because tensors may have control
# dependencies.
# Note that for loops that should be done after the loop was converted to
# tf.while_loop so that the expanded conditionals are properly scoped.
# Used to signal that an operand is safe for non-lazy evaluation.
SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND'
LOGICAL_OPERATORS = {
gast.And: 'ag__.and_',
gast.Not: 'ag__.not_',
gast.Or: 'ag__.or_',
}
EQUALITY_OPERATORS = {
gast.Eq: 'ag__.eq',
gast.NotEq: 'ag__.not_eq',
}
class LogicalExpressionTransformer(converter.Base):
"""Converts logical expressions to corresponding TF calls."""
def _overload_of(self, operator):
op_type = type(operator)
if op_type in LOGICAL_OPERATORS:
return LOGICAL_OPERATORS[op_type]
if self.ctx.program.options.uses(converter.Feature.EQUALITY_OPERATORS):
if op_type in EQUALITY_OPERATORS:
return EQUALITY_OPERATORS[op_type]
return None
def _as_lambda(self, expr):
return templates.replace_as_expression('lambda: expr', expr=expr)
def _as_binary_function(self, func_name, arg1, arg2):
return templates.replace_as_expression(
'func_name(arg1, arg2)',
func_name=parser.parse_expression(func_name),
arg1=arg1,
arg2=arg2)
def _as_binary_operation(self, op, arg1, arg2):
template = templates.replace_as_expression(
'arg1 is arg2',
arg1=arg1,
arg2=arg2)
template.ops[0] = op
return template
def _as_unary_function(self, func_name, arg):
return templates.replace_as_expression(
'func_name(arg)', func_name=parser.parse_expression(func_name), arg=arg)
def visit_Compare(self, node):
node = self.generic_visit(node)
if (not self.ctx.program.options.uses(
converter.Feature.EQUALITY_OPERATORS)):
return node
ops_and_comps = list(zip(node.ops, node.comparators))
left = node.left
# Repeated comparisons are converted to conjunctions:
# a < b < c -> a < b and b < c
op_tree = None
while ops_and_comps:
op, right = ops_and_comps.pop(0)
overload = self._overload_of(op)
if overload is not None:
binary_comparison = self._as_binary_function(overload, left, right)
else:
binary_comparison = self._as_binary_operation(op, left, right)
if op_tree is not None:
op_tree = self._as_binary_function('ag__.and_',
self._as_lambda(op_tree),
self._as_lambda(binary_comparison))
else:
op_tree = binary_comparison
left = right
assert op_tree is not None
return op_tree
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
overload = self._overload_of(node.op)
if overload is None:
return node
return self._as_unary_function(overload, node.operand)
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node_values = node.values
right = node.values.pop()
while node_values:
left = node_values.pop()
right = self._as_binary_function(
self._overload_of(node.op), self._as_lambda(left),
self._as_lambda(right))
return right
def transform(node, ctx):
transformer = LogicalExpressionTransformer(ctx)
return transformer.visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/logical_expressions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for asserts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class AssertsTest(converter_testing.TestCase):
def test_basic(self):
def test_fn(a):
assert a, 'testmsg'
return a
with ops.Graph().as_default():
with self.converted(test_fn, (function_scopes, asserts), {}) as result:
op = result.test_fn(constant_op.constant(False))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, 'testmsg'):
self.evaluate(op)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/asserts_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modifies the signature to allow resolving the value of default arguments.
Normally, function symbols are captured either in a function's globals or
closure. This is not true for default arguments, which are evaluated when the
function is defined:
b = 1
c = 2
def f(a=b + 1):
return a + c
In the above example, the namespace of the function would include `c = 2` but
not `b`.
If we were to naively generate a new function:
def new_f(a=b + 1):
return a + c
The generated code would fail to load unless we exposed a symbol `b`. Capturing
the closure of such an expression is difficult. However, we can capture the
default value of argument `a` with relative ease.
This converter replaces all default argument expressions with a constant so
that they don't cause loading to fail. This requires that the default values
are reset after loading the transformed function:
def new_f(a=None):
return a + c
# ... later, after new_f was loaded ...
new_f.__defaults__ = f.__defaults__
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import parser
class _Function(object):
pass
class ArgDefaultsTransformer(converter.Base):
"""Transforms top level argument defaults."""
def visit_Lambda(self, node):
self.state[_Function].enter()
node.args = self.visit(node.args)
# Only the top level function is modified - no need to visit the children.
self.state[_Function].exit()
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
node.args = self.visit(node.args)
# Only the top level function is modified - no need to visit the children.
self.state[_Function].exit()
return node
def visit_arguments(self, node):
if self.state[_Function].level > 2:
return node
for i in range(len(node.defaults)):
node.defaults[i] = parser.parse_expression('None')
for i, d in enumerate(node.kw_defaults):
if d is not None:
node.kw_defaults[i] = parser.parse_expression('None')
# Only the top level function is modified - no need to visit the children.
return node
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
return ArgDefaultsTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/arg_defaults.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for function_scopes module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class FunctionBodyTransformerTest(converter_testing.TestCase):
@test_util.run_deprecated_v1
def test_basic(self):
def test_fn(l):
"""Docstring."""
a = 1
l += a
return l
with self.converted(test_fn, function_scopes, {}) as result:
result_op = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', result_op.op.name)
self.assertEqual('Docstring.', result.test_fn.__doc__)
@test_util.run_deprecated_v1
def test_multiline_docstring(self):
tf = None
def test_fn():
"""First sentence.
Second sentence.
"""
return tf.constant(1)
with self.converted(test_fn, function_scopes, {},
(constant_op.constant,)) as result:
result_op = result.test_fn()
self.assertIn('test_fn/', result_op.op.name)
self.assertIn('First sentence.', result.test_fn.__doc__)
self.assertIn('Second sentence.', result.test_fn.__doc__)
@test_util.run_deprecated_v1
def test_nested_functions(self):
def test_fn(l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
with self.converted(test_fn, function_scopes, {},
(ops.name_scope,)) as result:
first, second = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', first.op.name)
self.assertNotIn('inner_fn', first.op.name)
self.assertIn('test_fn/inner_fn/', second.op.inputs[0].name)
@test_util.run_deprecated_v1
def test_method(self):
class TestClass(object):
def test_fn(self, l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
ns = {'TestClass': TestClass}
node, ctx = self.prepare(TestClass, ns)
node = function_scopes.transform(node, ctx)
with self.compiled(node, {}, (ops.name_scope,)) as result:
first, second = result.TestClass().test_fn(constant_op.constant(1))
self.assertIn('test_fn/', first.op.name)
self.assertNotIn('inner_fn', first.op.name)
self.assertIn('test_fn/inner_fn/', second.op.inputs[0].name)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/function_scopes_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles control flow statements: while, for, if."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import annos
# TODO(mdan): Refactor functions to make them smaller.
class ControlFlowTransformer(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
def _create_cond_branch(self, body_name, aliased_orig_names,
aliased_new_names, body, returns):
if not returns:
# TODO(b/110167197): Replace with a plain return.
template = """
return 1
"""
return_stmt = templates.replace(template)
elif len(returns) == 1:
template = """
return retval
"""
return_stmt = templates.replace(template, retval=returns[0])
else:
template = """
return (retvals,)
"""
return_stmt = templates.replace(template, retvals=returns)
if aliased_orig_names:
template = """
def body_name():
aliased_new_names, = aliased_orig_names,
body
return_stmt
"""
return templates.replace(
template,
body_name=body_name,
body=body,
aliased_orig_names=aliased_orig_names,
aliased_new_names=aliased_new_names,
return_stmt=return_stmt)
else:
template = """
def body_name():
body
return_stmt
"""
return templates.replace(
template, body_name=body_name, body=body, return_stmt=return_stmt)
def _create_cond_expr(self, results, test, body_name, orelse_name,
state_getter_name, state_setter_name,
basic_symbol_names, composite_symbol_names):
if results is not None:
template = """
results = ag__.if_stmt(test, body_name, orelse_name,
state_getter_name, state_setter_name,
(basic_symbol_names,),
(composite_symbol_names,))
"""
return templates.replace(
template,
test=test,
results=results,
body_name=body_name,
orelse_name=orelse_name,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names)
else:
template = """
ag__.if_stmt(test, body_name, orelse_name, getter_name, setter_name,
(basic_symbol_names,), (composite_symbol_names,))
"""
return templates.replace(
template,
test=test,
body_name=body_name,
orelse_name=orelse_name,
getter_name=state_getter_name,
setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names)
def _fmt_symbols(self, symbol_set):
if not symbol_set:
return 'no variables'
return ', '.join(map(str, symbol_set))
def _determine_aliased_symbols(self, scope, node_defined_in, block):
if block:
block_live_in = set(anno.getanno(block[0], anno.Static.LIVE_VARS_IN))
else:
block_live_in = set()
modified_live = scope.modified & node_defined_in & block_live_in
# Composite symbols are handled elsewhere see _create_state_functions
return {s for s in modified_live if not s.is_composite()}
def _create_state_functions(self, composites, state_getter_name,
state_setter_name):
if composites:
composite_tuple = tuple(composites)
template = """
def state_getter_name():
return composite_tuple,
def state_setter_name(vals):
composite_tuple, = vals
"""
node = templates.replace(
template,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
composite_tuple=composite_tuple)
else:
template = """
def state_getter_name():
return ()
def state_setter_name(_):
pass
"""
node = templates.replace(
template,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name)
return node
def _create_undefined_assigns(self, undefined_symbols):
assignments = []
for s in undefined_symbols:
template = '''
var = ag__.Undefined(symbol_name)
'''
assignments += templates.replace(
template,
var=s,
symbol_name=gast.Constant(s.ssf(), kind=None))
return assignments
def visit_If(self, node):
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
# Note: this information needs to be extracted before the body conversion
# that happens in the call to generic_visit below, because the conversion
# generates nodes that lack static analysis annotations.
need_alias_in_body = self._determine_aliased_symbols(
body_scope, defined_in, node.body)
need_alias_in_orelse = self._determine_aliased_symbols(
orelse_scope, defined_in, node.orelse)
node = self.generic_visit(node)
modified_in_cond = body_scope.modified | orelse_scope.modified
returned_from_cond = set()
composites = set()
for s in modified_in_cond:
if s in live_out and not s.is_composite():
returned_from_cond.add(s)
if s.is_composite():
# Special treatment for compound objects, always return them.
# This allows special handling within the if_stmt itself.
# For example, in TensorFlow we need to restore the state of composite
# symbols to ensure that only effects from the executed branch are seen.
composites.add(s)
created_in_body = body_scope.modified & returned_from_cond - defined_in
created_in_orelse = orelse_scope.modified & returned_from_cond - defined_in
basic_created_in_body = tuple(
s for s in created_in_body if not s.is_composite())
basic_created_in_orelse = tuple(
s for s in created_in_orelse if not s.is_composite())
# These variables are defined only in a single branch. This is fine in
# Python so we pass them through. Another backend, e.g. Tensorflow, may need
# to handle these cases specially or throw an Error.
possibly_undefined = (set(basic_created_in_body) ^
set(basic_created_in_orelse))
# Alias the closure variables inside the conditional functions, to allow
# the functions access to the respective variables.
# We will alias variables independently for body and orelse scope,
# because different branches might write different variables.
aliased_body_orig_names = tuple(need_alias_in_body)
aliased_orelse_orig_names = tuple(need_alias_in_orelse)
aliased_body_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), body_scope.referenced)
for s in aliased_body_orig_names)
aliased_orelse_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), orelse_scope.referenced)
for s in aliased_orelse_orig_names)
alias_body_map = dict(zip(aliased_body_orig_names, aliased_body_new_names))
alias_orelse_map = dict(
zip(aliased_orelse_orig_names, aliased_orelse_new_names))
node_body = ast_util.rename_symbols(node.body, alias_body_map)
node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
cond_var_name = self.ctx.namer.new_symbol('cond', body_scope.referenced)
body_name = self.ctx.namer.new_symbol('if_true', body_scope.referenced)
orelse_name = self.ctx.namer.new_symbol('if_false', orelse_scope.referenced)
all_referenced = body_scope.referenced | orelse_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', all_referenced)
state_setter_name = self.ctx.namer.new_symbol('set_state', all_referenced)
returned_from_cond = tuple(returned_from_cond)
composites = tuple(composites)
if returned_from_cond:
if len(returned_from_cond) == 1:
cond_results = returned_from_cond[0]
else:
cond_results = gast.Tuple([s.ast() for s in returned_from_cond], None)
returned_from_body = tuple(
alias_body_map[s] if s in need_alias_in_body else s
for s in returned_from_cond)
returned_from_orelse = tuple(
alias_orelse_map[s] if s in need_alias_in_orelse else s
for s in returned_from_cond)
else:
# When the cond would return no value, we leave the cond called without
# results. That in turn should trigger the side effect guards. The
# branch functions will return a dummy value that ensures cond
# actually has some return value as well.
cond_results = None
# TODO(mdan): Replace with None once side_effect_guards is retired.
returned_from_body = (templates.replace_as_expression(
'ag__.match_staging_level(1, cond_var_name)',
cond_var_name=cond_var_name),)
returned_from_orelse = (templates.replace_as_expression(
'ag__.match_staging_level(1, cond_var_name)',
cond_var_name=cond_var_name),)
cond_assign = self.create_assignment(cond_var_name, node.test)
body_def = self._create_cond_branch(
body_name,
aliased_orig_names=aliased_body_orig_names,
aliased_new_names=aliased_body_new_names,
body=node_body,
returns=returned_from_body)
orelse_def = self._create_cond_branch(
orelse_name,
aliased_orig_names=aliased_orelse_orig_names,
aliased_new_names=aliased_orelse_new_names,
body=node_orelse,
returns=returned_from_orelse)
undefined_assigns = self._create_undefined_assigns(possibly_undefined)
composite_defs = self._create_state_functions(
composites, state_getter_name, state_setter_name)
basic_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in returned_from_cond)
composite_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in composites)
cond_expr = self._create_cond_expr(cond_results, cond_var_name, body_name,
orelse_name, state_getter_name,
state_setter_name, basic_symbol_names,
composite_symbol_names)
if_ast = (
undefined_assigns + composite_defs + body_def + orelse_def +
cond_assign + cond_expr)
return if_ast
def _get_basic_loop_vars(self, modified_symbols, live_in, live_out):
# The loop variables corresponding to simple symbols (e.g. `x`).
basic_loop_vars = []
for s in modified_symbols:
if s.is_composite():
# TODO(mdan): Raise an error when this happens for a TF loop.
continue
# Variables not live into or out of the loop are considered local to the
# loop.
if s not in live_in and s not in live_out:
continue
basic_loop_vars.append(s)
return frozenset(basic_loop_vars)
def _get_composite_loop_vars(self, modified_symbols, live_in):
# The loop variables corresponding to composite symbols (e.g. `self.x`).
composite_loop_vars = []
for s in modified_symbols:
if not s.is_composite():
continue
# Mutations made to objects created inside the loop will appear as writes
# to composite symbols. Because these mutations appear as modifications
# made to composite symbols, we check whether the composite's parent is
# actually live into the loop.
# Example:
# while cond:
# x = Foo()
# x.foo = 2 * x.foo # x.foo is live into the loop, but x is not.
if not all(p in live_in for p in s.support_set):
continue
composite_loop_vars.append(s)
return frozenset(composite_loop_vars)
def _get_loop_vars(self, node, modified_symbols):
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
reserved_symbols = body_scope.referenced
basic_loop_vars = self._get_basic_loop_vars(
modified_symbols, live_in, live_out)
composite_loop_vars = self._get_composite_loop_vars(
modified_symbols, live_in)
# Variable that are used or defined inside the loop, but not defined
# before entering the loop. Only simple variables must be defined. The
# composite ones will be implicitly checked at runtime.
undefined_lives = basic_loop_vars - defined_in
return (basic_loop_vars, composite_loop_vars, reserved_symbols,
undefined_lives)
def _loop_var_constructs(self, basic_loop_vars):
loop_vars = tuple(basic_loop_vars)
loop_vars_ast_tuple = gast.Tuple([n.ast() for n in loop_vars], None)
if len(loop_vars) == 1:
loop_vars = loop_vars[0]
return loop_vars, loop_vars_ast_tuple
def visit_While(self, node):
node = self.generic_visit(node)
(basic_loop_vars, composite_loop_vars, reserved_symbols,
possibly_undefs) = self._get_loop_vars(
node,
anno.getanno(node, annos.NodeAnno.BODY_SCOPE).modified)
loop_vars, loop_vars_ast_tuple = self._loop_var_constructs(
basic_loop_vars)
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved_symbols)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved_symbols)
state_functions = self._create_state_functions(
composite_loop_vars, state_getter_name, state_setter_name)
basic_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in basic_loop_vars)
composite_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in composite_loop_vars)
# TODO(mdan): Use a single template.
# If the body and test functions took a single tuple for loop_vars, instead
# of *loop_vars, then a single template could be used.
if loop_vars:
template = """
state_functions
def body_name(loop_vars):
body
return loop_vars,
def test_name(loop_vars):
return test
loop_vars_ast_tuple = ag__.while_stmt(
test_name,
body_name,
state_getter_name,
state_setter_name,
(loop_vars,),
(basic_symbol_names,),
(composite_symbol_names,))
"""
node = templates.replace(
template,
loop_vars=loop_vars,
loop_vars_ast_tuple=loop_vars_ast_tuple,
test_name=self.ctx.namer.new_symbol('loop_test', reserved_symbols),
test=node.test,
body_name=self.ctx.namer.new_symbol('loop_body', reserved_symbols),
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names)
else:
template = """
state_functions
def body_name():
body
return ()
def test_name():
return test
ag__.while_stmt(
test_name,
body_name,
state_getter_name,
state_setter_name,
(),
(),
(composite_symbol_names,))
"""
node = templates.replace(
template,
test_name=self.ctx.namer.new_symbol('loop_test', reserved_symbols),
test=node.test,
body_name=self.ctx.namer.new_symbol('loop_body', reserved_symbols),
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
composite_symbol_names=composite_symbol_names)
undefined_assigns = self._create_undefined_assigns(possibly_undefs)
return undefined_assigns + node
def visit_For(self, node):
node = self.generic_visit(node)
(basic_loop_vars, composite_loop_vars,
reserved_symbols, possibly_undefs) = self._get_loop_vars(
node, (anno.getanno(node, annos.NodeAnno.BODY_SCOPE).modified
| anno.getanno(node, annos.NodeAnno.ITERATE_SCOPE).modified))
loop_vars, loop_vars_ast_tuple = self._loop_var_constructs(
basic_loop_vars)
body_name = self.ctx.namer.new_symbol('loop_body', reserved_symbols)
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved_symbols)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved_symbols)
state_functions = self._create_state_functions(
composite_loop_vars, state_getter_name, state_setter_name)
if anno.hasanno(node, 'extra_test'):
extra_test = anno.getanno(node, 'extra_test')
extra_test_name = self.ctx.namer.new_symbol(
'extra_test', reserved_symbols)
template = """
def extra_test_name(loop_vars):
return extra_test_expr
"""
extra_test_function = templates.replace(
template,
extra_test_name=extra_test_name,
loop_vars=loop_vars,
extra_test_expr=extra_test)
else:
extra_test_name = parser.parse_expression('None')
extra_test_function = []
# Workaround for PEP-3113
# iterates_var holds a single variable with the iterates, which may be a
# tuple.
iterates_var_name = self.ctx.namer.new_symbol(
'iterates', reserved_symbols)
template = """
iterates = iterates_var_name
"""
iterate_expansion = templates.replace(
template,
iterates=node.target,
iterates_var_name=iterates_var_name)
undefined_assigns = self._create_undefined_assigns(possibly_undefs)
basic_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in basic_loop_vars)
composite_symbol_names = tuple(
gast.Constant(str(symbol), kind=None) for symbol in composite_loop_vars)
# TODO(mdan): Use a single template.
# If the body and test functions took a single tuple for loop_vars, instead
# of *loop_vars, then a single template could be used.
if loop_vars:
template = """
undefined_assigns
state_functions
def body_name(iterates_var_name, loop_vars):
iterate_expansion
body
return loop_vars,
extra_test_function
loop_vars_ast_tuple = ag__.for_stmt(
iter_,
extra_test_name,
body_name,
state_getter_name,
state_setter_name,
(loop_vars,),
(basic_symbol_names,),
(composite_symbol_names,))
"""
return templates.replace(
template,
undefined_assigns=undefined_assigns,
loop_vars=loop_vars,
loop_vars_ast_tuple=loop_vars_ast_tuple,
iter_=node.iter,
iterate_expansion=iterate_expansion,
iterates_var_name=iterates_var_name,
extra_test_name=extra_test_name,
extra_test_function=extra_test_function,
body_name=body_name,
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
basic_symbol_names=basic_symbol_names,
composite_symbol_names=composite_symbol_names)
else:
template = """
undefined_assigns
state_functions
def body_name(iterates_var_name):
iterate_expansion
body
return ()
extra_test_function
ag__.for_stmt(
iter_,
extra_test_name,
body_name,
state_getter_name,
state_setter_name,
(),
(),
(composite_symbol_names,))
"""
return templates.replace(
template,
undefined_assigns=undefined_assigns,
iter_=node.iter,
iterate_expansion=iterate_expansion,
iterates_var_name=iterates_var_name,
extra_test_name=extra_test_name,
extra_test_function=extra_test_function,
body_name=body_name,
body=node.body,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
composite_symbol_names=composite_symbol_names)
def transform(node, ctx):
node = ControlFlowTransformer(ctx).visit(node)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/control_flow.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles function calls, by generating compiled function names and calls.
Note: this transformer does not rename the top level object being converted;
that is the caller's responsibility.
Requires function_scopes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.utils import ag_logging
# TODO(mdan): Rename to FunctionCallsTransformer.
class _Function(object):
no_root = True
def __init__(self):
self.context_name = None
set_trace_warned = False
class CallTreeTransformer(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def visit_Lambda(self, node):
if anno.hasanno(node, 'function_context_name'):
# Lambda functions created during the conversion process have no
# context manager.
self.state[_Function].enter()
self.state[_Function].context_name = anno.getanno(
node, 'function_context_name')
node = self.generic_visit(node)
self.state[_Function].exit()
else:
node = self.generic_visit(node)
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
# Note: if the conversion process ever creates helper functions, this
# assumption will no longer hold.
assert anno.hasanno(node, 'function_context_name'), (
'The function_scopes converter always creates a scope for functions.')
self.state[_Function].context_name = anno.getanno(
node, 'function_context_name')
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
if self.state[_Function].level < 2:
# Top-level functions lose their decorator because the conversion is
# always just-in-time and by the time it happens the decorators are
# already set to be applied.
node.decorator_list = []
else:
# Inner functions are converted already, so we insert a decorator to
# prevent double conversion. Double conversion would work too, but this
# saves the overhead.
node.decorator_list.append(
parser.parse_expression('ag__.do_not_convert_internal'))
if node.returns:
node.returns = self.visit(node.returns)
self.state[_Function].exit()
return node
def visit_With(self, node):
# Context manager calls (in node.items) are not converted.
node.body = self.visit_block(node.body)
return node
def visit_Call(self, node):
full_name = str(anno.getanno(node.func, anno.Basic.QN, default=''))
function_context_name = self.state[_Function].context_name
node = self.generic_visit(node)
# TODO(mdan): Refactor converted_call as a 'Call' operator.
# Calls to the internal 'ag__' module are never converted (though their
# arguments might be).
if full_name.startswith('ag__.'):
return node
# Calls to the function context manager (inserted by function_scopes) are
# also safe.
if full_name.startswith(function_context_name + '.'):
return node
# Calls to pdb.set_trace or ipdb.set_trace are never converted. We don't use
# the normal mechanisms to bypass these literals because they are sensitive
# to the frame they are being called from.
# TODO(mdan): Generalize this to a "static whitelist" config.
if full_name in ('pdb.set_trace', 'ipdb.set_trace', 'breakpoint'):
global set_trace_warned
if not set_trace_warned:
# TODO(mdan): Update and shorten once available on tensorflow.org.
ag_logging.warn(
'Detected `pdb.set_trace()` in converted code. The code'
' generated by AutoGraph is not optimized for step-by-step'
' debugging. See https://github.com/tensorflow/tensorflow/'
'blob/master/tensorflow/python/autograph/g3doc/reference/'
'debugging.md.')
set_trace_warned = True
return node
if (full_name == 'print' and
not self.ctx.program.options.uses(converter.Feature.BUILTIN_FUNCTIONS)):
return node
func = node.func
starred_arg = None
normal_args = []
for a in node.args:
if isinstance(a, gast.Starred):
assert starred_arg is None, 'Multiple *args should be impossible.'
starred_arg = a
else:
normal_args.append(a)
if starred_arg is None:
args = templates.replace_as_expression('(args,)', args=normal_args)
else:
args = templates.replace_as_expression(
'(args,) + tuple(stararg)',
stararg=starred_arg.value,
args=normal_args)
kwargs_arg = None
normal_keywords = []
for k in node.keywords:
if k.arg is None:
assert kwargs_arg is None, 'Multiple **kwargs should be impossible.'
kwargs_arg = k
else:
normal_keywords.append(k)
if kwargs_arg is None:
if not normal_keywords:
kwargs = parser.parse_expression('None')
else:
kwargs = ast_util.keywords_to_dict(normal_keywords)
else:
kwargs = templates.replace_as_expression(
'dict(kwargs, **keywords)',
kwargs=kwargs_arg.value,
keywords=ast_util.keywords_to_dict(normal_keywords))
template = """
ag__.converted_call(func, options, args, kwargs, function_ctx)
"""
new_call = templates.replace_as_expression(
template,
func=func,
options=parser.parse_expression(function_context_name + '.callopts'),
args=args,
kwargs=kwargs,
function_ctx=function_context_name)
return new_call
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
return CallTreeTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/call_trees.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code converters used by Autograph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Naming conventions:
# * each converter should specialize on a single idiom; be consistent with
# the Python reference for naming
# * all converters inherit core.converter.Base
# * module names describe the idiom that the converter covers, plural
# * the converter class is named consistent with the module, singular and
# includes the word Transformer
#
# Example:
#
# lists.py
# class ListTransformer(converter.Base)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lowers break statements to conditionals."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
class _Break(object):
def __init__(self):
self.used = False
self.control_var_name = None
def __repr__(self):
return 'used: %s, var: %s' % (self.used, self.control_var_name)
class BreakTransformer(converter.Base):
"""Canonicalizes break statements into additional conditionals."""
def visit_Break(self, node):
self.state[_Break].used = True
var_name = self.state[_Break].control_var_name
# TODO(mdan): This will fail when expanded inside a top-level else block.
template = """
var_name = True
continue
"""
return templates.replace(template, var_name=var_name)
def _guard_if_present(self, block, var_name):
"""Prevents the block from executing if var_name is set."""
if not block:
return block
template = """
if ag__.not_(var_name):
block
"""
node = templates.replace(
template,
var_name=var_name,
block=block)
return node
def _process_body(self, nodes, break_var):
self.state[_Break].enter()
self.state[_Break].control_var_name = break_var
nodes = self.visit_block(nodes)
break_used = self.state[_Break].used
self.state[_Break].exit()
return nodes, break_used
def visit_While(self, node):
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
break_var = self.ctx.namer.new_symbol('break_', scope.referenced)
node.test = self.visit(node.test)
node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
if break_used:
# Python's else clause only triggers if the loop exited cleanly (e.g.
# break did not trigger).
guarded_orelse = self._guard_if_present(node.orelse, break_var)
template = """
var_name = False
while ag__.and_(lambda: test, lambda: ag__.not_(var_name)):
body
else:
orelse
"""
node = templates.replace(
template,
var_name=break_var,
test=node.test,
body=node.body,
orelse=guarded_orelse)
return node
def visit_For(self, node):
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
break_var = self.ctx.namer.new_symbol('break_', scope.referenced)
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
if break_used:
# Python's else clause only triggers if the loop exited cleanly (e.g.
# break did not trigger).
guarded_orelse = self._guard_if_present(node.orelse, break_var)
extra_test = templates.replace_as_expression(
'ag__.not_(var_name)', var_name=break_var)
# The extra test is hidden in the AST, which will confuse the static
# analysis. To mitigate that, we insert a no-op statement that ensures
# the control variable is marked as used.
# TODO(mdan): Use a marker instead, e.g. ag__.condition_loop_on(var_name)
template = """
var_name = False
for target in iter_:
(var_name,)
body
else:
orelse
"""
node = templates.replace(
template,
var_name=break_var,
iter_=node.iter,
target=node.target,
body=node.body,
orelse=guarded_orelse)
anno.setanno(node[1], 'extra_test', extra_test)
return node
def transform(node, ctx):
transformer = BreakTransformer(ctx)
node = transformer.visit(node)
return node
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/break_statements.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles directives.
This converter removes the directive functions from the code and moves the
information they specify into AST annotations. It is a specialized form of
static analysis, one that is specific to AutoGraph.
Note that this requires that the actual directive functions are static - that
is, they do not change at runtime. So if you do something like this:
tf.autograph.set_loop_options = <new function>
Then the directive will may no longer be recognized. Furthermore, if the
converted function is cached, such an action action may be irreversible.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.util import tf_inspect
ENCLOSING_LOOP = 'enclosing_loop'
STATIC_VALUE = 'static_value'
"""Used for AST annotations, see visit_Name."""
def _map_args(call_node, function):
"""Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
Raises:
ValueError: if the default arguments are not correctly set
"""
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
call_args = tf_inspect.getcallargs(function, *args, **kwds)
# Keyword arguments not specified in kwds will be mapped to their defaults,
# which are Python values. Since we don't currently have a way to transform
# those into AST references, we simply remove them. By convention, directives
# use UNSPECIFIED as default value for for optional arguments. No other
# defaults should be present.
unexpected_defaults = []
for k in call_args:
if (k not in kwds
and call_args[k] not in args
and call_args[k] is not directives.UNSPECIFIED):
unexpected_defaults.append(k)
if unexpected_defaults:
raise ValueError('Unexpected keyword argument values, %s, for function %s'
% (zip(unexpected_defaults,
[call_args[k] for k in unexpected_defaults]),
function))
return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}
class DirectivesTransformer(converter.Base):
"""Parses compiler directives and converts them into AST annotations."""
def _process_symbol_directive(self, call_node, directive):
if len(call_node.args) < 1:
raise ValueError('"%s" requires a positional first argument'
' as the target' % directive.__name__)
target = call_node.args[0]
defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
for def_ in defs:
def_.directives[directive] = _map_args(call_node, directive)
return call_node
def _process_statement_directive(self, call_node, directive):
if self.local_scope_level < 2:
raise ValueError(
'"%s" must be used inside a statement' % directive.__name__)
target = self.get_local(ENCLOSING_LOOP)
node_anno = anno.getanno(target, converter.AgAnno.DIRECTIVES, {})
node_anno[directive] = _map_args(call_node, directive)
anno.setanno(target, converter.AgAnno.DIRECTIVES, node_anno)
return call_node
def visit_Name(self, node):
node = self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
is_defined = bool(defs)
if not is_defined and node.id in self.ctx.info.namespace:
anno.setanno(node, STATIC_VALUE, self.ctx.info.namespace[node.id])
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
parent_val = anno.getanno(node.value, STATIC_VALUE, default=None)
if parent_val is not None and inspect.ismodule(parent_val):
if hasattr(parent_val, node.attr):
anno.setanno(node, STATIC_VALUE, getattr(parent_val, node.attr))
return node
def visit_Expr(self, node):
node = self.generic_visit(node)
if isinstance(node.value, gast.Call):
call_node = node.value
static_val = anno.getanno(call_node.func, STATIC_VALUE, default=None)
if static_val is not None:
# Note: directive calls are not output in the generated code, hence
# the removal from the code by returning None.
if static_val is directives.set_element_type:
self._process_symbol_directive(call_node, static_val)
return None
elif static_val is directives.set_loop_options:
self._process_statement_directive(call_node, static_val)
return None
return node
# TODO(mdan): This will be insufficient for other control flow.
# That means that if we ever have a directive that affects things other than
# loops, we'll need support for parallel scopes, or have multiple converters.
def _track_and_visit_loop(self, node):
self.enter_local_scope()
self.set_local(ENCLOSING_LOOP, node)
node = self.generic_visit(node)
self.exit_local_scope()
return node
def visit_While(self, node):
return self._track_and_visit_loop(node)
def visit_For(self, node):
return self._track_and_visit_loop(node)
def transform(node, ctx):
return DirectivesTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/directives.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wraps the body of a converted function with auxiliary constructs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import annos
class _Function(object):
def __init__(self):
self.context_name = None
class FunctionBodyTransformer(converter.Base):
"""Wraps function bodies around autograph-specific boilerplate."""
def visit_Return(self, node):
if node.value is None:
return node
return templates.replace(
'return function_context_name.mark_return_value(value)',
function_context_name=self.state[_Function].context_name,
value=node.value)
def visit_Lambda(self, node):
self.state[_Function].enter()
node = self.generic_visit(node)
# Only wrap the top-level function. Theoretically, we can and should wrap
# everything, but that can lead to excessive boilerplate when lambdas are
# nested.
# TODO(mdan): Looks more closely for use cases that actually require this.
if self.state[_Function].level > 2:
self.state[_Function].exit()
return node
scope = anno.getanno(node, anno.Static.SCOPE)
function_context_name = self.ctx.namer.new_symbol('lambda_scope',
scope.referenced)
self.state[_Function].context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
template = """
ag__.with_function_scope(
lambda function_context: body, function_context_name, options)
"""
node.body = templates.replace_as_expression(
template,
options=self.ctx.program.options.to_ast(),
function_context=function_context_name,
function_context_name=gast.Constant(function_context_name, kind=None),
body=node.body)
self.state[_Function].exit()
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
function_context_name = self.ctx.namer.new_symbol(
'{}_scope'.format(node.name), scope.referenced)
self.state[_Function].context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
node = self.generic_visit(node)
docstring_node = None
if node.body:
first_statement = node.body[0]
if (isinstance(first_statement, gast.Expr) and
isinstance(first_statement.value, gast.Constant)):
docstring_node = first_statement
node.body = node.body[1:]
template = """
with ag__.FunctionScope(
function_name, context_name, options) as function_context:
body
"""
wrapped_body = templates.replace(
template,
function_name=gast.Constant(node.name, kind=None),
context_name=gast.Constant(function_context_name, kind=None),
options=self.ctx.program.options.to_ast(),
function_context=function_context_name,
body=node.body)
if docstring_node is not None:
wrapped_body = [docstring_node] + wrapped_body
node.body = wrapped_body
self.state[_Function].exit()
return node
def transform(node, ctx):
return FunctionBodyTransformer(ctx).visit(node)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/function_scopes.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for list_comprehensions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import list_comprehensions
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.platform import test
class ListCompTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, test_fn, *inputs):
with self.converted(test_fn, list_comprehensions, {}) as result:
self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
def test_basic(self):
def test_fn(l):
s = [e * e for e in l]
return s
self.assertTransformedEquivalent(test_fn, [])
self.assertTransformedEquivalent(test_fn, [1, 2, 3])
def test_multiple_generators(self):
def test_fn(l):
s = [e * e for sublist in l for e in sublist]
return s
self.assertTransformedEquivalent(test_fn, [])
self.assertTransformedEquivalent(test_fn, [[1], [2], [3]])
def test_cond(self):
def test_fn(l):
s = [e * e for e in l if e > 1]
return s
self.assertTransformedEquivalent(test_fn, [])
self.assertTransformedEquivalent(test_fn, [1, 2, 3])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/list_comprehensions_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ControlFlowTest(converter_testing.TestCase):
def assertTransformedResult(self, test_fn, inputs, expected, symbols=None):
if not isinstance(inputs, tuple):
inputs = (inputs,)
if not symbols:
symbols = {}
with self.converted(test_fn, control_flow, symbols,
(constant_op.constant,)) as result:
self.assertAllEqual(self.evaluate(result.test_fn(*inputs)), expected)
@test_util.run_deprecated_v1
def test_while_basic(self):
def test_fn(n):
i = 0
s = 0
while i < n:
s += i
i += 1
return s, i, n
self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))
@test_util.run_deprecated_v1
def test_while_nested(self):
def test_fn(n):
i = 0
j = 0
s = 0
while i < n:
while j < i:
j += 3
u = i + j # 'u' is not defined within the inner loop
s += u
i += 1
j = 0
return s, i, j, n
self.assertTransformedResult(test_fn, constant_op.constant(5),
(25, 5, 0, 5))
@test_util.run_deprecated_v1
def test_while_single_output(self):
def test_fn(n):
while n > 0:
n -= 1
return n
self.assertTransformedResult(test_fn, constant_op.constant(5), 0)
def test_while_composite_state(self):
class TestClass(object):
def __init__(self):
self.x = constant_op.constant(3)
def test_fn(n):
tc = TestClass()
while n > 0:
tc.x += 1
n -= 1
return n
self.assertTransformedResult(
test_fn, constant_op.constant(5), 0, symbols={'TestClass': TestClass})
def test_while_composite_state_initialized_in_loop(self):
class TestClass(object):
pass
def test_fn(n, x):
tc = TestClass()
while n < 5:
if n == 0:
tc.x = x
else:
tc.x = tc.x + 1
n += 1
return tc.x
self.assertTransformedResult(
test_fn, (0, constant_op.constant(10)),
14,
symbols={'TestClass': TestClass})
with self.converted(
test_fn, control_flow, {'TestClass': TestClass}) as result:
# TODO(b/128519776): Better error message.
with self.assertRaisesRegex(
AttributeError, '\'TestClass\' object has no attribute \'x\''):
result.test_fn(constant_op.constant(0), constant_op.constant(5))
def test_while_nested_composite_state(self):
class TestClass(object):
def __init__(self):
self.x = constant_op.constant(3)
def test_fn(n):
tc = TestClass()
while n > 0:
if n < 2:
tc.x += 1
n -= 1
return n
self.assertTransformedResult(
test_fn, constant_op.constant(5), 0, symbols={'TestClass': TestClass})
def test_while_local_composite(self):
class TestClass(object):
def __init__(self):
self.x = constant_op.constant(3)
def test_fn(n):
while n > 0:
tc = TestClass()
tc.x = tc.x
n -= 1
return n
self.assertTransformedResult(
test_fn, constant_op.constant(5), 0, symbols={'TestClass': TestClass})
# TODO(b/127642077): Add tests for x.y.z = 2*x.y.z and x.y[z] = 2*x.y[z].
def test_while_local_composite_complex_nestable(self):
# This class is ok to be in a tf.while_loop's state.
class TestClass(collections.namedtuple('TestClass', ('x'))):
pass
def test_fn(n):
tc = TestClass([constant_op.constant(0)])
while n > 0:
tc = TestClass([constant_op.constant(3)])
tc.x[0] = tc.x[0] + 1
n -= 1
return tc.x[0]
ns = {'TestClass': TestClass, 'constant_op': constant_op}
self.assertTransformedResult(
test_fn, constant_op.constant(5), 4, symbols=ns)
def test_while_local_composite_complex_illegal(self):
class TestClass(object):
def __init__(self):
self.x = [constant_op.constant(3)]
def test_fn(n):
while n > 0:
tc = TestClass()
tc.x[0] = tc.x[0] + 1
n -= 1
return tc.x[0]
with self.converted(
test_fn, control_flow, {'TestClass': TestClass}) as result:
# The tested function would require `tc` to become part of the while loop
# state, but TensorFlow doesn't support classes at the moment.
with self.assertRaisesRegexp(
ValueError, 'must be defined before the loop:.*tc.*'):
result.test_fn(constant_op.constant(5))
@test_util.run_deprecated_v1
def test_while_dispatches_by_cond_only(self):
class TensorIncompatibleNumeric(object):
"""Works in arithmetic expression, but errors out with TF ops."""
def __init__(self, val):
self.val = val
def __add__(self, other):
return TensorIncompatibleNumeric(self.val + other)
def test_fn(n, s):
while n > 0:
n -= 1
s += n
return s
self.assertTransformedResult(test_fn, (constant_op.constant(5), 0), 10)
with self.converted(test_fn, control_flow, {}) as result:
# n alone controls the staging. When the loop is not staged, Python
# knows how to add the two objects. But when staged, tf.while_loop will
# not know how to deal with the TensorIncompatibleNumeric object.
self.assertEqual(result.test_fn(5, TensorIncompatibleNumeric(0)).val, 10)
with self.assertRaises(TypeError):
result.test_fn(constant_op.constant(5), TensorIncompatibleNumeric(0))
@test_util.run_deprecated_v1
def test_if_basic(self):
def test_fn(n):
a = 0
b = 0
if n > 0:
a = -n
else:
b = 2 * n
return a, b
self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))
self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))
def test_if_sparse_tensor(self):
def test_fn(cond, a):
if cond:
a = -a
return a
st = sparse_tensor.SparseTensor(
indices=((0,),), values=(0,), dense_shape=(1,))
self.assertTransformedResult(test_fn, (st, constant_op.constant(1)), -1)
self.assertTransformedResult(test_fn, (None, constant_op.constant(1)), 1)
@test_util.run_deprecated_v1
def test_if_complex_outputs(self):
class TestClass(object):
def __init__(self, a, b):
self.a = a
self.b = b
def test_fn(n, obj):
obj.a = 0
obj.b = 0
if n > 0:
obj.a = -n
else:
obj.b = 2 * n
return obj
with self.converted(test_fn, control_flow, {}) as result:
res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))
self.assertEqual(self.evaluate((res_obj.a, res_obj.b)), (-1, 0))
res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))
self.assertEqual(self.evaluate((res_obj.a, res_obj.b)), (0, -2))
@test_util.run_deprecated_v1
def test_if_single_output(self):
def test_fn(n):
if n > 0:
n = -n
return n
self.assertTransformedResult(test_fn, constant_op.constant(1), -1)
@test_util.run_deprecated_v1
def test_if_semi(self):
def test_fn(n):
if n > 0:
n = 3
return n
self.assertTransformedResult(test_fn, constant_op.constant(2), 3)
self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)
@test_util.run_deprecated_v1
def test_if_local_var(self):
def test_fn(n):
if n > 0:
b = 4
n = b + 1
return n
self.assertTransformedResult(test_fn, constant_op.constant(1), 5)
self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
@test_util.run_deprecated_v1
def test_if_no_outputs(self):
def test_fn(n):
if n > 0:
b = 4 # pylint:disable=unused-variable
return n
# Without side effect guards, the if statement will stage a cond,
# but that will be pruned at execution.
self.assertTransformedResult(test_fn, constant_op.constant(1), 1)
self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
@test_util.run_deprecated_v1
def test_if_unbalanced_multiple_composites(self):
class Foo(object):
def __init__(self):
self.b = 2
self.c = 3
def test_fn(x, condition):
z = 5
if condition:
x.b = 7
x.c = 11
z = 13
return x.b, x.c, z
self.assertTransformedResult(test_fn, (Foo(), constant_op.constant(True)),
(7, 11, 13))
self.assertTransformedResult(test_fn, (Foo(), constant_op.constant(False)),
(2, 3, 5))
@test_util.run_deprecated_v1
def test_if_unbalanced_composite(self):
class Foo(object):
def __init__(self):
self.b = 2
def test_fn(x, condition):
z = 5
if condition:
x.b = 7
z = 13
return x.b, z
self.assertTransformedResult(test_fn, (Foo(), constant_op.constant(True)),
(7, 13))
self.assertTransformedResult(test_fn, (Foo(), constant_op.constant(False)),
(2, 5))
@test_util.run_deprecated_v1
def test_simple_for(self):
def test_fn(l):
s1 = 0
s2 = 0
for e in l:
s1 += e
s2 += e * e
return s1, s2
self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(test_fn, empty_vector, (0, 0))
@test_util.run_deprecated_v1
def test_for_single_output(self):
def test_fn(l):
s = 0
for e in l:
s += e
return s
self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(test_fn, empty_vector, 0)
def test_for_iterated_expression(self):
eval_count = [0]
def count_evals(x):
eval_count[0] += 1
return x
def test_fn(n):
s = 0
for e in count_evals(range(n)):
s += e
return s
ns = {'count_evals': count_evals}
node, ctx = self.prepare(test_fn, ns)
node = control_flow.transform(node, ctx)
with self.compiled(node, ns) as result:
self.assertEqual(result.test_fn(5), 10)
self.assertEqual(eval_count[0], 1)
def test_for_composite_state_initialized_in_loop(self):
class TestClass(object):
pass
def test_fn(n, x):
tc = TestClass()
for i in n:
if i == 0:
tc.x = x
else:
tc.x = tc.x + i
return tc.x
self.assertTransformedResult(
test_fn, (range(5), constant_op.constant(10)),
20,
symbols={'TestClass': TestClass})
with self.converted(
test_fn, control_flow, {'TestClass': TestClass}) as result:
# TODO(b/128519776): Better error message.
with self.assertRaisesRegex(
AttributeError, '\'TestClass\' object has no attribute \'x\''):
result.test_fn(
constant_op.constant(list(range(5))), constant_op.constant(5))
@test_util.run_deprecated_v1
def test_for_tuple_unpacking(self):
def test_fn(x_list):
z = tf.constant(0) # pylint:disable=undefined-variable
for i, x in enumerate(x_list):
z = z + x + i
return z
self.assertTransformedResult(test_fn, [3, 3], 7)
def test_for_with_comprehension_in_body(self):
def test_fn(l, n):
s = constant_op.constant(list(range(n)))
for _ in l:
s += constant_op.constant([a for a in range(n)])
return s
self.assertTransformedResult(
test_fn, (constant_op.constant([1, 2, 3]), 5),
np.array(range(5)) * 4,
symbols={'constant_op': constant_op})
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/control_flow_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for directives module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import directives as directives_converter
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.core.converter import AgAnno
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
class DirectivesTest(converter_testing.TestCase):
def test_local_target(self):
def test_fn():
l = []
string_var = 0
directives.set_element_type(l, 'a', string_var)
node, ctx = self.prepare(test_fn, {'directives': directives})
node = directives_converter.transform(node, ctx)
def_, = anno.getanno(node.body[0].targets[0],
anno.Static.DEFINITIONS)
d = def_.directives[directives.set_element_type]
self.assertEqual(d['dtype'].value, 'a')
self.assertEqual(d['shape'].id, 'string_var')
def test_argument_target(self):
def test_fn(a):
directives.set_element_type(a, 1, shape=2)
node, ctx = self.prepare(test_fn, {'directives': directives})
node = directives_converter.transform(node, ctx)
def_, = anno.getanno(node.args.args[0], anno.Static.DEFINITIONS)
d = def_.directives[directives.set_element_type]
self.assertEqual(d['dtype'].value, 1)
self.assertEqual(d['shape'].value, 2)
def test_loop_target(self):
def test_fn():
a = True
while True:
directives.set_loop_options(parallel_iterations=10, back_prop=a)
node, ctx = self.prepare(test_fn, {'directives': directives})
node = directives_converter.transform(node, ctx)
d = anno.getanno(node.body[1], AgAnno.DIRECTIVES)
d = d[directives.set_loop_options]
self.assertEqual(d['parallel_iterations'].value, 10)
self.assertEqual(d['back_prop'].id, 'a')
self.assertNotIn('swap_memory', d)
def test_loop_target_with_no_loop(self):
def test_fn():
directives.set_loop_options()
node, ctx = self.prepare(test_fn, {'directives': directives})
with self.assertRaisesRegexp(ValueError, 'must be used inside a statement'):
node = directives_converter.transform(node, ctx)
def test_invalid_default(self):
def invalid_directive(valid_arg, invalid_default=object()):
del valid_arg
del invalid_default
return
def call_invalid_directive():
invalid_directive(1)
node, _ = parser.parse_entity(call_invalid_directive, ())
# Find the call to the invalid directive
node = node.body[0].value
with self.assertRaisesRegexp(ValueError, 'Unexpected keyword.*'):
directives_converter._map_args(node, invalid_directive)
def test_value_verification_does_not_trigger_properties(self):
self_test = self
class TestClass(object):
@property
def b(self):
self_test.fail('This should never be evaluated')
tc = TestClass()
def test_fn():
return tc.b + 1
node, ctx = self.prepare(test_fn, {'tc': tc})
node = directives_converter.transform(node, ctx)
self.assertIsNotNone(node)
def test_value_verification_does_not_trigger_getattr(self):
class TestClass(object):
def __init__(self):
self.getattr_called = False
def __getattr__(self, _):
# Note: seems that any exception raised here is absorbed by hasattr.
# So we can't call test.fail or raise.
self.getattr_called = True
tc = TestClass()
def test_fn():
return tc.b + 1
node, ctx = self.prepare(test_fn, {'tc': tc})
node = directives_converter.transform(node, ctx)
self.assertIsNotNone(node)
self.assertFalse(tc.getattr_called)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/directives_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for break_statements module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
class BreakCanonicalizationTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, test_fn, *inputs):
with self.converted(test_fn, break_statements, {},
(constant_op.constant,)) as result:
self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
def test_while_loop(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
break
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 4)
def test_for_loop(self):
def test_fn(a):
v = []
for x in a:
x -= 1
if x % 2 == 0:
break
v.append(x)
return v
with self.converted(test_fn, break_statements, {},
(constant_op.constant,)) as result:
# The break is incompletely canonicalized. The loop will not interrupt,
# but the section following the break will be skipped.
self.assertEqual([3], result.test_fn([5, 4]))
def test_nested(self):
def test_fn(x):
v = []
u = []
w = []
while x > 0:
x -= 1
if x % 2 == 0:
if x % 3 != 0:
u.append(x)
else:
w.append(x)
break
v.append(x)
return v, u, w
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 11)
def test_nested_loops(self):
def test_fn(x):
v = []
u = []
while x > 0:
x -= 1
y = x
while y > 0:
y -= 1
if y % 2 == 0:
break
u.append(y)
if x == 0:
break
v.append(x)
return v, u
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 5)
def test_loop_orelse(self):
def test_fn(x):
v = []
u = []
while x > 0:
x -= 1
y = x
while y > 1:
break
else:
u.append(y)
break
v.append(x)
return v, u
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, 3)
def test_multiple_correlated_breaks_with_side_effects(self):
def test_fn(cond1):
lst = []
while True:
if cond1:
lst.append(1)
else:
break
if lst[-1] > 0: # lst always has an element here
break
return lst
self.assertTransformedEquivalent(test_fn, True)
self.assertTransformedEquivalent(test_fn, False)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/break_statements_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slices module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import slices
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
class SliceTest(converter_testing.TestCase):
def test_index_access(self):
def test_fn(l):
return l[1]
node, ctx = self.prepare(test_fn, {})
def_, = anno.getanno(node.args.args[0], anno.Static.DEFINITIONS)
def_.directives[directives.set_element_type] = {
'dtype': parser.parse_expression('tf.int32')
}
node = slices.transform(node, ctx)
with self.compiled(node, {}, (dtypes.int32,)) as result:
with self.cached_session() as sess:
tl = list_ops.tensor_list_from_tensor(
[1, 2], element_shape=constant_op.constant([], dtype=dtypes.int32))
y = result.test_fn(tl)
self.assertEqual(2, self.evaluate(y))
def test_index_access_multiple_definitions(self):
def test_fn(l):
if l:
l = []
return l[1]
node, ctx = self.prepare(test_fn, {})
def_, = anno.getanno(node.args.args[0], anno.Static.DEFINITIONS)
def_.directives[directives.set_element_type] = {
'dtype': parser.parse_expression('tf.int32')
}
def_, = anno.getanno(node.body[0].body[0].targets[0],
anno.Static.DEFINITIONS)
def_.directives[directives.set_element_type] = {
'dtype': parser.parse_expression('tf.float32')
}
with self.assertRaises(ValueError):
slices.transform(node, ctx)
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/slices_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for call_trees module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.platform import test
class CallTreesTest(converter_testing.TestCase):
def test_normal_function(self):
def test_fn(f):
return f() + 20
with self.converted(test_fn, (function_scopes, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda: 1), 21)
self.assertListEqual(self.dynamic_calls, [((), None)])
def test_function_with_expression_in_argument(self):
def test_fn(f, g):
return f(g() + 20) + 4000
with self.converted(test_fn, (function_scopes, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda x: x + 300, lambda: 1), 4321)
self.assertListEqual(self.dynamic_calls, [
((), None),
((21,), None),
])
def test_function_with_call_in_argument(self):
def test_fn(f, g):
return f(g()) + 300
with self.converted(test_fn, (function_scopes, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda x: x + 20, lambda: 1), 321)
self.assertListEqual(self.dynamic_calls, [
((), None),
((1,), None),
])
def test_function_chaining(self):
def get_one():
return 1
def test_fn():
return get_one().__add__(20)
with self.converted(test_fn, (function_scopes, call_trees),
{'get_one': get_one}, ()) as result:
self.assertEqual(result.test_fn(), 21)
self.assertListEqual(self.dynamic_calls, [
((), None),
((20,), None),
])
def test_function_with_kwarg(self):
def test_fn(f, a, b):
return f(a, c=b) + 300
with self.converted(test_fn, (function_scopes, call_trees), {}) as result:
self.assertEqual(result.test_fn(lambda a, c: a + c, 1, 20), 321)
self.assertListEqual(self.dynamic_calls, [((1,), {'c': 20})])
def test_function_with_kwargs_starargs(self):
def test_fn(f, a, *args, **kwargs):
return f(a, *args, **kwargs) + 5
with self.converted(test_fn, (function_scopes, call_trees), {}) as result:
self.assertEqual(
result.test_fn(lambda *args, **kwargs: 7, 1, *[2, 3], **{
'b': 4,
'c': 5
}), 12)
self.assertListEqual(self.dynamic_calls, [((1, 2, 3), {'b': 4, 'c': 5})])
def test_function_with_kwargs_starargs_only(self):
def f(*args):
return sum(args)
def test_fn():
args = [1, 20, 300]
return f(*args) + 4000
with self.converted(test_fn, (function_scopes, call_trees),
{'f': f}) as result:
self.assertEqual(result.test_fn(), 4321)
self.assertListEqual(self.dynamic_calls, [((1, 20, 300), None)])
def test_function_with_kwargs_keywords(self):
def test_fn(f, a, b, **kwargs):
return f(a, b=b, **kwargs) + 5
with self.converted(test_fn, (function_scopes, call_trees), {}) as result:
self.assertEqual(
result.test_fn(lambda *args, **kwargs: 7, 1, 2, **{'c': 3}), 12)
self.assertListEqual(self.dynamic_calls, [((1,), {'b': 2, 'c': 3})])
def test_debugger_set_trace(self):
tracking_list = []
pdb = imp.new_module('fake_pdb')
pdb.set_trace = lambda: tracking_list.append(1)
def test_fn():
return pdb.set_trace()
with self.converted(test_fn, (function_scopes, call_trees),
{'pdb': pdb}) as result:
result.test_fn()
self.assertListEqual(tracking_list, [1])
def test_class_method(self):
class TestClass(object):
def other_method(self, x):
return x + 20
def test_method(self, a):
return self.other_method(a) + 300
tc = TestClass()
with self.converted(TestClass.test_method, (function_scopes, call_trees),
{}) as result:
self.assertEqual(321, result.test_method(tc, 1))
self.assertListEqual(self.dynamic_calls, [((1,), None)])
def test_object_method(self):
class TestClass(object):
def other_method(self, x):
return x + 20
def test_method(self, a):
return self.other_method(a) + 300
tc = TestClass()
with self.converted(tc.test_method, (function_scopes, call_trees),
{}) as result:
self.assertEqual(321, result.test_method(tc, 1))
self.assertListEqual(self.dynamic_calls, [((1,), None)])
if __name__ == '__main__':
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/python/autograph/converters/call_trees_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.