diff --git a/.gitattributes b/.gitattributes index 9d763df862814a7d3a9b949a86f59287b4de15ba..447003ec6470841df032ad5b6794eeab96c87980 100644 --- a/.gitattributes +++ b/.gitattributes @@ -810,3 +810,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_py_exception_r videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/p/p7 b/llava_next/share/terminfo/p/p7 new file mode 100644 index 0000000000000000000000000000000000000000..539b4ff6570496b03f33a1c661ff661137089364 Binary files /dev/null and b/llava_next/share/terminfo/p/p7 differ diff --git a/llava_next/share/terminfo/p/pc7300 b/llava_next/share/terminfo/p/pc7300 new file mode 100644 index 0000000000000000000000000000000000000000..b41843d6a40a7d583c1dab1a8ab7da28442fbab6 Binary files /dev/null and b/llava_next/share/terminfo/p/pc7300 differ diff --git a/llava_next/share/terminfo/p/pccon0 b/llava_next/share/terminfo/p/pccon0 new file mode 100644 index 0000000000000000000000000000000000000000..e3553fbf74d37c40fd3e89f469c8b58e2dee703d Binary files /dev/null and b/llava_next/share/terminfo/p/pccon0 differ diff --git a/llava_next/share/terminfo/p/pcconsole b/llava_next/share/terminfo/p/pcconsole new file mode 100644 index 0000000000000000000000000000000000000000..5d7c9457abb34e75d0d0c1f5d58b68eefa697978 Binary files /dev/null and b/llava_next/share/terminfo/p/pcconsole differ diff --git a/llava_next/share/terminfo/p/pcvt25 b/llava_next/share/terminfo/p/pcvt25 new file mode 100644 index 0000000000000000000000000000000000000000..1de663551f07ebe9f214c226fc8e7b9b1b96295d Binary files /dev/null and b/llava_next/share/terminfo/p/pcvt25 differ diff --git a/llava_next/share/terminfo/p/pcvtXX b/llava_next/share/terminfo/p/pcvtXX new file mode 100644 index 0000000000000000000000000000000000000000..3d28171d0f224d01b3228e013d8135c598788e7f Binary files /dev/null and b/llava_next/share/terminfo/p/pcvtXX differ diff --git a/llava_next/share/terminfo/p/pe1251 b/llava_next/share/terminfo/p/pe1251 new file mode 100644 index 0000000000000000000000000000000000000000..01acc1b70c66fd6eb748506db7d001cdc8e487a1 Binary files /dev/null and b/llava_next/share/terminfo/p/pe1251 differ diff --git a/llava_next/share/terminfo/p/pe550 b/llava_next/share/terminfo/p/pe550 new file mode 100644 index 0000000000000000000000000000000000000000..2dc4fa3a3cb77217ddcc0d8bd8fcc65577fcc5f2 Binary files /dev/null and b/llava_next/share/terminfo/p/pe550 differ diff --git a/llava_next/share/terminfo/p/prism14-w b/llava_next/share/terminfo/p/prism14-w new file mode 100644 index 0000000000000000000000000000000000000000..1d7df53b710aff295e42eeca63d45879279a70fe Binary files /dev/null and b/llava_next/share/terminfo/p/prism14-w differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76ac020fb4885f235f9d4f07c67d8f8dfbf68167 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0fcb87adc815ecd4dd075690b96d873ccd85c90 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e39e6c7edf43ee809d99a69f73e1d22e1465b532 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c59762a31616325387b8b70f9afe4baa8dcb1cad Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9d266421ebd8bfdc9a2ce3791731b6365de9be1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9ae5930f554ed4b359f9ced928eb6cc5f5b7253 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/mtia/_utils.py b/parrot/lib/python3.10/site-packages/torch/mtia/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..090e26f321232f9687c2b348ac602dbb6699b03f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/mtia/_utils.py @@ -0,0 +1,38 @@ +from typing import Any + +import torch + +# The _get_device_index has been moved to torch.utils._get_device_index +from torch._utils import _get_device_index as _torch_get_device_index + + +def _get_device_index( + device: Any, optional: bool = False, allow_cpu: bool = False +) -> int: + r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``. + + If :attr:`device` is a torch.device object, returns the device index if it + is a MTIA device. Note that for a MTIA device without a specified index, + i.e., ``torch.device('mtia')``, this will return the current default MTIA + device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``, + CPU devices will be accepted and ``-1`` will be returned in this case. + + If :attr:`device` is a Python integer, it is returned as is. + + If :attr:`device` is ``None``, this will return the current default MTIA + device if :attr:`optional` is ``True``. + """ + if isinstance(device, int): + return device + if isinstance(device, str): + device = torch.device(device) + if isinstance(device, torch.device): + if allow_cpu: + if device.type not in ["mtia", "cpu"]: + raise ValueError(f"Expected a mtia or cpu device, but got: {device}") + elif device.type != "mtia": + raise ValueError(f"Expected a mtia device, but got: {device}") + if not torch.jit.is_scripting(): + if isinstance(device, torch.mtia.device): + return device.idx + return _torch_get_device_index(device, optional, allow_cpu) diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake new file mode 100644 index 0000000000000000000000000000000000000000..5517e8f0624b1e5538b761e1f4891227007d0045 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake @@ -0,0 +1,40 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + +# Present in upstream, but not supported on versions of cmake we need to support +# include_guard(GLOBAL) + +# Initializes `<_PREFIX>_` variables from the corresponding +# `<_PREFIX>__INIT`, for the configurations currently used. +function(cmake_initialize_per_config_variable _PREFIX _DOCSTRING) + string(STRIP "${${_PREFIX}_INIT}" _INIT) + set("${_PREFIX}" "${_INIT}" + CACHE STRING "${_DOCSTRING} during all build types.") + mark_as_advanced("${_PREFIX}") + + if (NOT CMAKE_NOT_USING_CONFIG_FLAGS) + set(_CONFIGS Debug Release MinSizeRel RelWithDebInfo) + + get_property(_GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) + if (_GENERATOR_IS_MULTI_CONFIG) + list(APPEND _CONFIGS ${CMAKE_CONFIGURATION_TYPES}) + else() + if (NOT CMAKE_NO_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE_INIT}" CACHE STRING + "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel ...") + endif() + list(APPEND _CONFIGS ${CMAKE_BUILD_TYPE}) + endif() + + list(REMOVE_DUPLICATES _CONFIGS) + foreach(_BUILD_TYPE IN LISTS _CONFIGS) + if (NOT "${_BUILD_TYPE}" STREQUAL "") + string(TOUPPER "${_BUILD_TYPE}" _BUILD_TYPE) + string(STRIP "${${_PREFIX}_${_BUILD_TYPE}_INIT}" _INIT) + set("${_PREFIX}_${_BUILD_TYPE}" "${_INIT}" + CACHE STRING "${_DOCSTRING} during ${_BUILD_TYPE} builds.") + mark_as_advanced("${_PREFIX}_${_BUILD_TYPE}") + endif() + endforeach() + endif() +endfunction() diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake new file mode 100644 index 0000000000000000000000000000000000000000..25ceb49f3dd8e684e35cac49834c4db0aa5c338a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake @@ -0,0 +1,109 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# Copyright (c) 2007-2009 +# Scientific Computing and Imaging Institute, University of Utah +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# + +####################################################################### +# Parses a .cubin file produced by nvcc and reports statistics about the file. + + +file(READ ${input_file} file_text) + +if (NOT "${file_text}" STREQUAL "") + + string(REPLACE ";" "\\;" file_text ${file_text}) + string(REPLACE "\ncode" ";code" file_text ${file_text}) + + list(LENGTH file_text len) + + foreach(line ${file_text}) + + # Only look at "code { }" blocks. + if(line MATCHES "^code") + + # Break into individual lines. + string(REGEX REPLACE "\n" ";" line ${line}) + + foreach(entry ${line}) + + # Extract kernel names. + if (${entry} MATCHES "[^g]name = ([^ ]+)") + set(entry "${CMAKE_MATCH_1}") + + # Check to see if the kernel name starts with "_" + set(skip FALSE) + # if (${entry} MATCHES "^_") + # Skip the rest of this block. + # message("Skipping ${entry}") + # set(skip TRUE) + # else () + message("Kernel: ${entry}") + # endif () + + endif() + + # Skip the rest of the block if necessary + if(NOT skip) + + # Registers + if (${entry} MATCHES "reg([ ]+)=([ ]+)([^ ]+)") + set(entry "${CMAKE_MATCH_3}") + message("Registers: ${entry}") + endif() + + # Local memory + if (${entry} MATCHES "lmem([ ]+)=([ ]+)([^ ]+)") + set(entry "${CMAKE_MATCH_3}") + message("Local: ${entry}") + endif() + + # Shared memory + if (${entry} MATCHES "smem([ ]+)=([ ]+)([^ ]+)") + set(entry "${CMAKE_MATCH_3}") + message("Shared: ${entry}") + endif() + + if (${entry} MATCHES "^}") + message("") + endif() + + endif() + + + endforeach() + + endif() + + endforeach() + +else() + # message("FOUND NO DEPENDS") +endif() diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake new file mode 100644 index 0000000000000000000000000000000000000000..9293df3aafbdefdd8664ae2860d1b5b7fc9bfbfb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake @@ -0,0 +1,303 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +########################################################################## +# This file runs the nvcc commands to produce the desired output file along with +# the dependency file needed by CMake to compute dependencies. In addition the +# file checks the output of each command and if the command fails it deletes the +# output files. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Describe each step +# +# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or +# RelWithDebInfo, but it should match one of the +# entries in CUDA_HOST_FLAGS. This is the build +# configuration used when compiling the code. If +# blank or unspecified Debug is assumed as this is +# what CMake does. +# +# generated_file:STRING=<> File to generate. This argument must be passed in. +# +# generated_cubin_file:STRING=<> File to generate. This argument must be passed +# in if build_cubin is true. + +cmake_policy(PUSH) +cmake_policy(SET CMP0007 NEW) +cmake_policy(SET CMP0010 NEW) +if(NOT generated_file) + message(FATAL_ERROR "You must specify generated_file on the command line") +endif() + +# Set these up as variables to make reading the generated file easier +set(CMAKE_COMMAND "@CMAKE_COMMAND@") # path +set(source_file "@source_file@") # path +set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") # path +set(cmake_dependency_file "@cmake_dependency_file@") # path +set(CUDA_make2cmake "@CUDA_make2cmake@") # path +set(CUDA_parse_cubin "@CUDA_parse_cubin@") # path +set(build_cubin @build_cubin@) # bool +set(CUDA_HOST_COMPILER "@CUDA_HOST_COMPILER@") # path +# We won't actually use these variables for now, but we need to set this, in +# order to force this file to be run again if it changes. +set(generated_file_path "@generated_file_path@") # path +set(generated_file_internal "@generated_file@") # path +set(generated_cubin_file_internal "@generated_cubin_file@") # path + +set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") # path +set(CUDA_NVCC_FLAGS @CUDA_NVCC_FLAGS@ ;; @CUDA_WRAP_OPTION_NVCC_FLAGS@) # list +@CUDA_NVCC_FLAGS_CONFIG@ +set(nvcc_flags @nvcc_flags@) # list +set(CUDA_NVCC_INCLUDE_DIRS [==[@CUDA_NVCC_INCLUDE_DIRS@]==]) # list (needs to be in lua quotes to address backslashes) +string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}") +set(CUDA_NVCC_COMPILE_DEFINITIONS [==[@CUDA_NVCC_COMPILE_DEFINITIONS@]==]) # list (needs to be in lua quotes see #16510 ). +set(format_flag "@format_flag@") # string +set(cuda_language_flag @cuda_language_flag@) # list + +# Clean up list of include directories and add -I flags +list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS) +set(CUDA_NVCC_INCLUDE_ARGS) +foreach(dir ${CUDA_NVCC_INCLUDE_DIRS}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") +endforeach() + +# Clean up list of compile definitions, add -D flags, and append to nvcc_flags +list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS) +foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS}) + list(APPEND nvcc_flags "-D${def}") +endforeach() + +if(build_cubin AND NOT generated_cubin_file) + message(FATAL_ERROR "You must specify generated_cubin_file on the command line") +endif() + +# This is the list of host compilation flags. It C or CXX should already have +# been chosen by FindCUDA.cmake. +@CUDA_HOST_FLAGS@ + +# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler +set(nvcc_host_compiler_flags "") +# If we weren't given a build_configuration, use Debug. +if(NOT build_configuration) + set(build_configuration Debug) +endif() +string(TOUPPER "${build_configuration}" build_configuration) +#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") +foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + string(APPEND nvcc_host_compiler_flags ",\"${flag}\"") +endforeach() +if (nvcc_host_compiler_flags) + set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) +endif() +#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") +# Add the build specific configuration flags +list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) + +# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority +list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 ) +list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 ) +if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) + if (CUDA_HOST_COMPILER STREQUAL "@_CUDA_MSVC_HOST_COMPILER@" AND DEFINED CCBIN) + set(CCBIN -ccbin "${CCBIN}") + else() + set(CCBIN -ccbin "${CUDA_HOST_COMPILER}") + endif() +endif() + +# cuda_execute_process - Executes a command with optional command echo and status message. +# +# status - Status message to print if verbose is true +# command - COMMAND argument from the usual execute_process argument structure +# ARGN - Remaining arguments are the command with arguments +# +# CUDA_result - return value from running the command +# +# Make this a macro instead of a function, so that things like RESULT_VARIABLE +# and other return variables are present after executing the process. +macro(cuda_execute_process status command) + set(_command ${command}) + if(NOT "x${_command}" STREQUAL "xCOMMAND") + message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") + endif() + if(verbose) + execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) + # Now we need to build up our command string. We are accounting for quotes + # and spaces, anything else is left up to the user to fix if they want to + # copy and paste a runnable command line. + set(cuda_execute_process_string) + foreach(arg ${ARGN}) + # If there are quotes, excape them, so they come through. + string(REPLACE "\"" "\\\"" arg ${arg}) + # Args with spaces need quotes around them to get them to be parsed as a single argument. + if(arg MATCHES " ") + list(APPEND cuda_execute_process_string "\"${arg}\"") + else() + list(APPEND cuda_execute_process_string ${arg}) + endif() + endforeach() + # Echo the command + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) + endif() + # Run the command + execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) +endmacro() + +# Delete the target file +cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" + ) + +# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag +# for dependency generation and hope for the best. +set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") +set(CUDA_VERSION @CUDA_VERSION@) + +# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This +# can cause incorrect dependencies when #including files based on this macro which is +# defined in the generating passes of nvcc invocation. We will go ahead and manually +# define this for now until a future version fixes this bug. +set(CUDACC_DEFINE -D__CUDACC__) + +# Generate the dependency file +cuda_execute_process( + "Generating dependency file: ${NVCC_generated_dependency_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + -M + ${CUDACC_DEFINE} + "${source_file}" + -o "${NVCC_generated_dependency_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${depends_CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the cmake readable dependency file to a temp file. Don't put the +# quotes just around the filenames for the input_file and output_file variables. +# CMake will pass the quotes through and not be able to find the file. +cuda_execute_process( + "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" + -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" + -D "verbose=${verbose}" + -P "${CUDA_make2cmake}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Copy the file if it is different +cuda_execute_process( + "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Delete the temporary file +cuda_execute_process( + "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the code +cuda_execute_process( + "Generating ${generated_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${cuda_language_flag} + ${format_flag} -o "${generated_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + # Since nvcc can sometimes leave half done files make sure that we delete the output file. + cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" + ) + message(FATAL_ERROR "Error generating file ${generated_file}") +else() + if(verbose) + message("Generated ${generated_file} successfully.") + endif() +endif() + +# Cubin resource report commands. +if( build_cubin ) + # Run with -cubin to produce resource usage report. + cuda_execute_process( + "Generating ${generated_cubin_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -cubin + -o "${generated_cubin_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + + # Execute the parser script. + cuda_execute_process( + "Executing the parser script" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:STRING=${generated_cubin_file}" + -P "${CUDA_parse_cubin}" + ) + +endif() + +cmake_policy(POP) diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake new file mode 100644 index 0000000000000000000000000000000000000000..90de8fb0d84447eaab9aaf76600c8d228c2a842e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake @@ -0,0 +1,280 @@ +# Synopsis: +# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures]) +# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures +# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...) +# - "Auto" detects local machine GPU compute arch at runtime. +# - "Common" and "All" cover common and entire subsets of architectures +# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX +# NAME: Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal Volta Turing Ampere +# NUM: Any number. Only those pairs are currently accepted by NVCC though: +# 3.5 3.7 5.0 5.2 5.3 6.0 6.2 7.0 7.2 7.5 8.0 +# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable} +# Additionally, sets ${out_variable}_readable to the resulting numeric list +# Example: +# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell) +# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS}) +# +# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA +# + +if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language + if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" + AND CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)") + set(CUDA_VERSION "${CMAKE_MATCH_1}") + endif() +endif() + +# See: https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list + +# This list will be used for CUDA_ARCH_NAME = All option +set(CUDA_KNOWN_GPU_ARCHITECTURES "Kepler" "Maxwell") + +# This list will be used for CUDA_ARCH_NAME = Common option (enabled by default) +set(CUDA_COMMON_GPU_ARCHITECTURES "3.5" "5.0") + +# This list is used to filter CUDA archs when autodetecting +set(CUDA_ALL_GPU_ARCHITECTURES "3.5" "5.0") + +if(CUDA_VERSION VERSION_GREATER "10.5") + list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ampere") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.0") + + if(CUDA_VERSION VERSION_LESS "11.1") + set(CUDA_LIMIT_GPU_ARCHITECTURE "8.0") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0+PTX") + endif() +endif() + +if(NOT CUDA_VERSION VERSION_LESS "11.1") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.6") + set(CUDA_LIMIT_GPU_ARCHITECUTRE "8.6") + + if(CUDA_VERSION VERSION_LESS "11.8") + set(CUDA_LIMIT_GPU_ARCHITECTURE "8.9") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6+PTX") + endif() +endif() + +if(NOT CUDA_VERSION VERSION_LESS "11.8") + list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ada") + list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Hopper") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.9") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0") + + if(CUDA_VERSION VERSION_LESS "12.0") + set(CUDA_LIMIT_GPU_ARCHITECTURE "9.0") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9+PTX") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0+PTX") + endif() +endif() + +if(NOT CUDA_VERSION VERSION_LESS "12.0") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0a") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0a") + list(REMOVE_ITEM CUDA_COMMON_GPU_ARCHITECTURES "3.5") + list(REMOVE_ITEM CUDA_ALL_GPU_ARCHITECTURES "3.5") +endif() + +################################################################################################ +# A function for automatic detection of GPUs installed (if autodetection is enabled) +# Usage: +# CUDA_DETECT_INSTALLED_GPUS(OUT_VARIABLE) +# +function(CUDA_DETECT_INSTALLED_GPUS OUT_VARIABLE) + if(NOT CUDA_GPU_DETECT_OUTPUT) + if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language + set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cu") + else() + set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cpp") + endif() + + file(WRITE ${file} "" + "#include \n" + "#include \n" + "int main()\n" + "{\n" + " int count = 0;\n" + " if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n" + " if (count == 0) return -1;\n" + " for (int device = 0; device < count; ++device)\n" + " {\n" + " cudaDeviceProp prop;\n" + " if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n" + " std::printf(\"%d.%d \", prop.major, prop.minor);\n" + " }\n" + " return 0;\n" + "}\n") + + if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language + try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file} + RUN_OUTPUT_VARIABLE compute_capabilities) + else() + try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CUDA_INCLUDE_DIRS}" + LINK_LIBRARIES ${CUDA_LIBRARIES} + RUN_OUTPUT_VARIABLE compute_capabilities) + endif() + + # Filter unrelated content out of the output. + string(REGEX MATCHALL "[0-9]+\\.[0-9]+" compute_capabilities "${compute_capabilities}") + + if(run_result EQUAL 0) + string(REPLACE "2.1" "2.1(2.0)" compute_capabilities "${compute_capabilities}") + set(CUDA_GPU_DETECT_OUTPUT ${compute_capabilities} + CACHE INTERNAL "Returned GPU architectures from detect_gpus tool" FORCE) + endif() + endif() + + if(NOT CUDA_GPU_DETECT_OUTPUT) + message(STATUS "Automatic GPU detection failed. Building for common architectures.") + set(${OUT_VARIABLE} ${CUDA_COMMON_GPU_ARCHITECTURES} PARENT_SCOPE) + else() + # Filter based on CUDA version supported archs + set(CUDA_GPU_DETECT_OUTPUT_FILTERED "") + separate_arguments(CUDA_GPU_DETECT_OUTPUT) + foreach(ITEM IN ITEMS ${CUDA_GPU_DETECT_OUTPUT}) + if(CUDA_LIMIT_GPU_ARCHITECTURE AND (ITEM VERSION_GREATER CUDA_LIMIT_GPU_ARCHITECTURE OR + ITEM VERSION_EQUAL CUDA_LIMIT_GPU_ARCHITECTURE)) + list(GET CUDA_COMMON_GPU_ARCHITECTURES -1 NEWITEM) + string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${NEWITEM}") + else() + string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${ITEM}") + endif() + endforeach() + + set(${OUT_VARIABLE} ${CUDA_GPU_DETECT_OUTPUT_FILTERED} PARENT_SCOPE) + endif() +endfunction() + + +################################################################################################ +# Function for selecting GPU arch flags for nvcc based on CUDA architectures from parameter list +# Usage: +# SELECT_NVCC_ARCH_FLAGS(out_variable [list of CUDA compute archs]) +function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable) + set(CUDA_ARCH_LIST "${ARGN}") + + if("X${CUDA_ARCH_LIST}" STREQUAL "X" ) + set(CUDA_ARCH_LIST "Auto") + endif() + + set(cuda_arch_bin) + set(cuda_arch_ptx) + + if("${CUDA_ARCH_LIST}" STREQUAL "All") + set(CUDA_ARCH_LIST ${CUDA_KNOWN_GPU_ARCHITECTURES}) + elseif("${CUDA_ARCH_LIST}" STREQUAL "Common") + set(CUDA_ARCH_LIST ${CUDA_COMMON_GPU_ARCHITECTURES}) + elseif("${CUDA_ARCH_LIST}" STREQUAL "Auto") + CUDA_DETECT_INSTALLED_GPUS(CUDA_ARCH_LIST) + message(STATUS "Autodetected CUDA architecture(s): ${CUDA_ARCH_LIST}") + endif() + + # Now process the list and look for names + string(REGEX REPLACE "[ \t]+" ";" CUDA_ARCH_LIST "${CUDA_ARCH_LIST}") + list(REMOVE_DUPLICATES CUDA_ARCH_LIST) + foreach(arch_name ${CUDA_ARCH_LIST}) + set(arch_bin) + set(arch_ptx) + set(add_ptx FALSE) + # Check to see if we are compiling PTX + if(arch_name MATCHES "(.*)\\+PTX$") + set(add_ptx TRUE) + set(arch_name ${CMAKE_MATCH_1}) + endif() + if(arch_name MATCHES "^([0-9]\\.[0-9]a?(\\([0-9]\\.[0-9]\\))?)$") + set(arch_bin ${CMAKE_MATCH_1}) + set(arch_ptx ${arch_bin}) + else() + # Look for it in our list of known architectures + if(${arch_name} STREQUAL "Kepler+Tesla") + set(arch_bin 3.7) + elseif(${arch_name} STREQUAL "Kepler") + set(arch_bin 3.5) + set(arch_ptx 3.5) + elseif(${arch_name} STREQUAL "Maxwell+Tegra") + set(arch_bin 5.3) + elseif(${arch_name} STREQUAL "Maxwell") + set(arch_bin 5.0 5.2) + set(arch_ptx 5.2) + elseif(${arch_name} STREQUAL "Pascal") + set(arch_bin 6.0 6.1) + set(arch_ptx 6.1) + elseif(${arch_name} STREQUAL "Volta+Tegra") + set(arch_bin 7.2) + elseif(${arch_name} STREQUAL "Volta") + set(arch_bin 7.0 7.0) + set(arch_ptx 7.0) + elseif(${arch_name} STREQUAL "Turing") + set(arch_bin 7.5) + set(arch_ptx 7.5) + elseif(${arch_name} STREQUAL "Ampere+Tegra") + set(arch_bin 8.7) + elseif(${arch_name} STREQUAL "Ampere") + set(arch_bin 8.0 8.6) + set(arch_ptx 8.0 8.6) + elseif(${arch_name} STREQUAL "Ada") + set(arch_bin 8.9) + set(arch_ptx 8.9) + elseif(${arch_name} STREQUAL "Hopper") + set(arch_bin 9.0) + set(arch_ptx 9.0) + else() + message(SEND_ERROR "Unknown CUDA Architecture Name ${arch_name} in CUDA_SELECT_NVCC_ARCH_FLAGS") + endif() + endif() + if(NOT arch_bin) + message(SEND_ERROR "arch_bin wasn't set for some reason") + endif() + list(APPEND cuda_arch_bin ${arch_bin}) + if(add_ptx) + if (NOT arch_ptx) + set(arch_ptx ${arch_bin}) + endif() + list(APPEND cuda_arch_ptx ${arch_ptx}) + endif() + endforeach() + + # remove dots and convert to lists + string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}") + string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}") + string(REGEX MATCHALL "[0-9()]+a?" cuda_arch_bin "${cuda_arch_bin}") + string(REGEX MATCHALL "[0-9]+a?" cuda_arch_ptx "${cuda_arch_ptx}") + + if(cuda_arch_bin) + list(REMOVE_DUPLICATES cuda_arch_bin) + endif() + if(cuda_arch_ptx) + list(REMOVE_DUPLICATES cuda_arch_ptx) + endif() + + set(nvcc_flags "") + set(nvcc_archs_readable "") + + # Tell NVCC to add binaries for the specified GPUs + foreach(arch ${cuda_arch_bin}) + if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)") + # User explicitly specified ARCH for the concrete CODE + list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1}) + list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1}) + else() + # User didn't explicitly specify ARCH for the concrete CODE, we assume ARCH=CODE + list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch}) + list(APPEND nvcc_archs_readable sm_${arch}) + endif() + endforeach() + + # Tell NVCC to add PTX intermediate code for the specified architectures + foreach(arch ${cuda_arch_ptx}) + list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch}) + list(APPEND nvcc_archs_readable compute_${arch}) + endforeach() + + string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}") + set(${out_variable} ${nvcc_flags} PARENT_SCOPE) + set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE) +endfunction() diff --git a/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake new file mode 100644 index 0000000000000000000000000000000000000000..d1a442f8efd419afb8c8236cf8c9b880cb1d8b0b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake @@ -0,0 +1,30 @@ +# ---[ xpu + +# Poor man's include guard +if(TARGET torch::xpurt) + return() +endif() + +# Find SYCL library. +find_package(SYCLToolkit REQUIRED) +if(NOT SYCL_FOUND) + set(PYTORCH_FOUND_XPU FALSE) + return() +endif() +set(PYTORCH_FOUND_XPU TRUE) + +# SYCL library interface +add_library(torch::sycl INTERFACE IMPORTED) + +set_property( + TARGET torch::sycl PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${SYCL_INCLUDE_DIR}) +set_property( + TARGET torch::sycl PROPERTY INTERFACE_LINK_LIBRARIES + ${SYCL_LIBRARY}) + +# xpurt +add_library(torch::xpurt INTERFACE IMPORTED) +set_property( + TARGET torch::xpurt PROPERTY INTERFACE_LINK_LIBRARIES + torch::sycl) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so new file mode 100644 index 0000000000000000000000000000000000000000..a7a4ed45448a1e7d182f9ea59b0f75d84cb7e311 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9afb8654cb0473a45215f50e88d2deb315a9502f97ffbe579e944f69d21af3f +size 207656 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01ffa42c22d39fde8448a45848b0666973ecb98c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/base.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65b87212af35dbb3654048c9d6b482ea88195ff2 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/base.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/convolutional.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/convolutional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d252469124f36aa617cdf5f08f67277fc0949c4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/convolutional.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/core.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37e20f5515dadc16160fd671deb1b485015642b4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/core.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/layers.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd97334069c818094e09f5bf0f9dff0958f12433 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/layers.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/normalization.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/normalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f485ed17781039352100027952e192456836c9d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/normalization.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/pooling.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/pooling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8929b8bf80e229908e9bfce9aa4eea434b8ca771 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/pooling.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09bd29a605ee27510a42de1710c39faa61764c1d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..96078dda50d2f20fac1604aeefe39e4a8af037ab --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/base.py @@ -0,0 +1,22 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Contains the base Layer class, from which all layers inherit.""" +from tensorflow.python.keras.legacy_tf_layers import base + +InputSpec = base.InputSpec + +keras_style_scope = base.keras_style_scope +set_keras_style = base.set_keras_style +Layer = base.Layer diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/convolutional.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/convolutional.py new file mode 100644 index 0000000000000000000000000000000000000000..19d82d7d577d3c20873a1c8e3527cde3e2d01df2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/convolutional.py @@ -0,0 +1,48 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Contains the convolutional layer classes and their functional aliases. +""" +from tensorflow.python.keras.legacy_tf_layers import convolutional + +Conv1D = convolutional.Conv1D +conv1d = convolutional.conv1d +Conv2D = convolutional.Conv2D +conv2d = convolutional.conv2d +Conv3D = convolutional.Conv3D +conv3d = convolutional.conv3d +SeparableConv1D = convolutional.SeparableConv1D +SeparableConv2D = convolutional.SeparableConv2D +separable_conv1d = convolutional.separable_conv1d +separable_conv2d = convolutional.separable_conv2d +Conv2DTranspose = convolutional.Conv2DTranspose +conv2d_transpose = convolutional.conv2d_transpose +Conv3DTranspose = convolutional.Conv3DTranspose +conv3d_transpose = convolutional.conv3d_transpose + +# Aliases + +Convolution1D = Conv1D +Convolution2D = Conv2D +Convolution3D = Conv3D +SeparableConvolution2D = SeparableConv2D +Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose +Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose +convolution1d = conv1d +convolution2d = conv2d +convolution3d = conv3d +separable_convolution2d = separable_conv2d +convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose +convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/core.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/core.py new file mode 100644 index 0000000000000000000000000000000000000000..360ac2774389f5b948db013eb0aaef573568ff15 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/core.py @@ -0,0 +1,33 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Contains the core layers: Dense, Dropout. + +Also contains their functional aliases. +""" +from tensorflow.python.keras.legacy_tf_layers import core + + +Dense = core.Dense +dense = core.dense +Dropout = core.Dropout +dropout = core.dropout +Flatten = core.Flatten +flatten = core.flatten + +# Aliases + +FullyConnected = Dense +fully_connected = dense diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/layers.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..e75a55cd93612bc525fc724722c2c2b21df66d9d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/layers.py @@ -0,0 +1,71 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# pylint: disable=line-too-long +"""This library provides a set of high-level neural networks layers.""" + +# pylint: disable=g-bad-import-order,unused-import + +# Base objects. +from tensorflow.python.layers.base import Layer + +# Core layers. +from tensorflow.python.layers.core import Dense +from tensorflow.python.layers.core import Dropout +from tensorflow.python.layers.core import Flatten + +from tensorflow.python.layers.core import dense +from tensorflow.python.layers.core import dropout +from tensorflow.python.layers.core import flatten + +# Convolutional layers. +from tensorflow.python.layers.convolutional import SeparableConv1D +from tensorflow.python.layers.convolutional import SeparableConv2D +from tensorflow.python.layers.convolutional import SeparableConvolution2D +from tensorflow.python.layers.convolutional import Conv2DTranspose +from tensorflow.python.layers.convolutional import Convolution2DTranspose +from tensorflow.python.layers.convolutional import Conv3DTranspose +from tensorflow.python.layers.convolutional import Convolution3DTranspose +from tensorflow.python.layers.convolutional import Conv1D +from tensorflow.python.layers.convolutional import Convolution1D +from tensorflow.python.layers.convolutional import Conv2D +from tensorflow.python.layers.convolutional import Convolution2D +from tensorflow.python.layers.convolutional import Conv3D +from tensorflow.python.layers.convolutional import Convolution3D + +from tensorflow.python.layers.convolutional import separable_conv1d +from tensorflow.python.layers.convolutional import separable_conv2d +from tensorflow.python.layers.convolutional import conv2d_transpose +from tensorflow.python.layers.convolutional import conv3d_transpose +from tensorflow.python.layers.convolutional import conv1d +from tensorflow.python.layers.convolutional import conv2d +from tensorflow.python.layers.convolutional import conv3d + +# Pooling layers. +from tensorflow.python.layers.pooling import AveragePooling1D +from tensorflow.python.layers.pooling import MaxPooling1D +from tensorflow.python.layers.pooling import AveragePooling2D +from tensorflow.python.layers.pooling import MaxPooling2D +from tensorflow.python.layers.pooling import AveragePooling3D +from tensorflow.python.layers.pooling import MaxPooling3D + +from tensorflow.python.layers.pooling import average_pooling1d +from tensorflow.python.layers.pooling import max_pooling1d +from tensorflow.python.layers.pooling import average_pooling2d +from tensorflow.python.layers.pooling import max_pooling2d +from tensorflow.python.layers.pooling import average_pooling3d +from tensorflow.python.layers.pooling import max_pooling3d + +# pylint: enable=g-bad-import-order,unused-import diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/normalization.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..f1f26c76b2dbb9f50ed38eee809f2c2cb4e74b1b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/normalization.py @@ -0,0 +1,34 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Contains the normalization layer classes and their functional aliases. +""" + +from tensorflow.python.util import lazy_loader + +normalization = lazy_loader.LazyLoader( + 'normalization', globals(), + 'tf_keras.legacy_tf_layers.normalization') + + +# pylint: disable=invalid-name +# lazy load all the attributes until they are accessed for the first time +def __getattr__(name): + if name in ['BatchNormalization', 'BatchNorm']: + return normalization.BatchNormalization + elif name in ['batch_normalization', 'batch_norm']: + return normalization.batch_normalization + else: + raise AttributeError(f'module {__name__} doesn\'t have attribute {name}') diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/pooling.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/pooling.py new file mode 100644 index 0000000000000000000000000000000000000000..216f657d633c9296181a50dbaafe8a5ce81e9166 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/pooling.py @@ -0,0 +1,39 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Contains the pooling layer classes and their functional aliases. +""" +from tensorflow.python.keras.legacy_tf_layers import pooling + + +AveragePooling1D = pooling.AveragePooling1D +average_pooling1d = pooling.average_pooling1d +MaxPooling1D = pooling.MaxPooling1D +max_pooling1d = pooling.max_pooling1d +AveragePooling2D = pooling.AveragePooling2D +average_pooling2d = pooling.average_pooling2d +MaxPooling2D = pooling.MaxPooling2D +max_pooling2d = pooling.max_pooling2d +AveragePooling3D = pooling.AveragePooling3D +average_pooling3d = pooling.average_pooling3d +MaxPooling3D = pooling.MaxPooling3D +max_pooling3d = pooling.max_pooling3d + +# Aliases + +AvgPool2D = AveragePooling2D +MaxPool2D = MaxPooling2D +max_pool2d = max_pooling2d +avg_pool2d = average_pooling2d diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/utils.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6ec664156861b282363ac33a1d0c9a8ffa7a15dd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/layers/utils.py @@ -0,0 +1,225 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Contains layer utilities for input validation and format conversion.""" +from tensorflow.python.framework import smart_cond as smart_module +from tensorflow.python.ops import cond +from tensorflow.python.ops import variables + + +def convert_data_format(data_format, ndim): + if data_format == 'channels_last': + if ndim == 3: + return 'NWC' + elif ndim == 4: + return 'NHWC' + elif ndim == 5: + return 'NDHWC' + else: + raise ValueError(f'Input rank: {ndim} not supported. We only support ' + 'input rank 3, 4 or 5.') + elif data_format == 'channels_first': + if ndim == 3: + return 'NCW' + elif ndim == 4: + return 'NCHW' + elif ndim == 5: + return 'NCDHW' + else: + raise ValueError(f'Input rank: {ndim} not supported. We only support ' + 'input rank 3, 4 or 5.') + else: + raise ValueError(f'Invalid data_format: {data_format}. We only support ' + '"channels_first" or "channels_last"') + + +def normalize_tuple(value, n, name): + """Transforms a single integer or iterable of integers into an integer tuple. + + Args: + value: The value to validate and convert. Could an int, or any iterable + of ints. + n: The size of the tuple to be returned. + name: The name of the argument being validated, e.g. "strides" or + "kernel_size". This is only used to format error messages. + + Returns: + A tuple of n integers. + + Raises: + ValueError: If something else than an int/long or iterable thereof was + passed. + """ + if isinstance(value, int): + return (value,) * n + else: + try: + value_tuple = tuple(value) + except TypeError: + raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} ' + f'integers. Received: {str(value)}') + if len(value_tuple) != n: + raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} ' + f'integers. Received: {str(value)}') + for single_value in value_tuple: + try: + int(single_value) + except (ValueError, TypeError): + raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} ' + f'integers. Received: {str(value)} including element ' + f'{str(single_value)} of type ' + f'{str(type(single_value))}') + return value_tuple + + +def normalize_data_format(value): + data_format = value.lower() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('The `data_format` argument must be one of ' + '"channels_first", "channels_last". Received: ' + f'{str(value)}.') + return data_format + + +def normalize_padding(value): + padding = value.lower() + if padding not in {'valid', 'same'}: + raise ValueError('The `padding` argument must be one of "valid", "same". ' + f'Received: {str(padding)}.') + return padding + + +def conv_output_length(input_length, filter_size, padding, stride, dilation=1): + """Determines output length of a convolution given input length. + + Args: + input_length: integer. + filter_size: integer. + padding: one of "same", "valid", "full". + stride: integer. + dilation: dilation rate, integer. + + Returns: + The output length (integer). + """ + if input_length is None: + return None + assert padding in {'same', 'valid', 'full'} + dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) + if padding == 'same': + output_length = input_length + elif padding == 'valid': + output_length = input_length - dilated_filter_size + 1 + elif padding == 'full': + output_length = input_length + dilated_filter_size - 1 + return (output_length + stride - 1) // stride + + +def conv_input_length(output_length, filter_size, padding, stride): + """Determines input length of a convolution given output length. + + Args: + output_length: integer. + filter_size: integer. + padding: one of "same", "valid", "full". + stride: integer. + + Returns: + The input length (integer). + """ + if output_length is None: + return None + assert padding in {'same', 'valid', 'full'} + if padding == 'same': + pad = filter_size // 2 + elif padding == 'valid': + pad = 0 + elif padding == 'full': + pad = filter_size - 1 + return (output_length - 1) * stride - 2 * pad + filter_size + + +def deconv_output_length(input_length, filter_size, padding, stride): + """Determines output length of a transposed convolution given input length. + + Args: + input_length: integer. + filter_size: integer. + padding: one of "same", "valid", "full". + stride: integer. + + Returns: + The output length (integer). + """ + if input_length is None: + return None + input_length *= stride + if padding == 'valid': + input_length += max(filter_size - stride, 0) + elif padding == 'full': + input_length -= (stride + filter_size - 2) + return input_length + + +def smart_cond(pred, true_fn=None, false_fn=None, name=None): + """Return either `true_fn()` if predicate `pred` is true else `false_fn()`. + + If `pred` is a bool or has a constant value, we return either `true_fn()` + or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. + + Args: + pred: A scalar determining whether to return the result of `true_fn` or + `false_fn`. + true_fn: The callable to be performed if pred is true. + false_fn: The callable to be performed if pred is false. + name: Optional name prefix when using `tf.cond`. + + Returns: + Tensors returned by the call to either `true_fn` or `false_fn`. + + Raises: + TypeError: If `true_fn` or `false_fn` is not callable. + """ + if isinstance(pred, variables.Variable): + return cond.cond( + pred, true_fn=true_fn, false_fn=false_fn, name=name) + return smart_module.smart_cond( + pred, true_fn=true_fn, false_fn=false_fn, name=name) + + +def constant_value(pred): + """Return the bool value for `pred`, or None if `pred` had a dynamic value. + + Args: + pred: A scalar, either a Python bool or a TensorFlow boolean variable + or tensor, or the Python integer 1 or 0. + + Returns: + True or False if `pred` has a constant boolean value, None otherwise. + + Raises: + TypeError: If `pred` is not a Variable, Tensor or bool, or Python + integer 1 or 0. + """ + # Allow integer booleans. + if isinstance(pred, int): + if pred == 1: + pred = True + elif pred == 0: + pred = False + + if isinstance(pred, variables.Variable): + return None + return smart_module.smart_constant_value(pred) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2652b57cb6e989ef79e659ae876d610785e1420b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Ops related to Tensor Processing Units.""" + +import os +os.environ['TPU_ML_PLATFORM'] = 'Tensorflow' diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/api.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/api.py new file mode 100644 index 0000000000000000000000000000000000000000..ee7d486db7e49bacb2157d2296a074b08e00561a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/api.py @@ -0,0 +1,33 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Modules that need to be exported to the API. + +List TPU modules that aren't included elsewhere here so that they can be scanned +for tf_export decorations. +""" + +# pylint: disable=unused-import +from tensorflow.python.tpu import bfloat16 +from tensorflow.python.tpu import feature_column_v2 +from tensorflow.python.tpu import tpu + +from tensorflow.python.tpu import tpu_embedding_for_serving +from tensorflow.python.tpu import tpu_embedding_v1 +from tensorflow.python.tpu import tpu_embedding_v2 +from tensorflow.python.tpu import tpu_embedding_v2_utils +from tensorflow.python.tpu import tpu_embedding_v3 +from tensorflow.python.tpu import tpu_hardware_feature +from tensorflow.python.tpu import tpu_optimizer +# pylint: enable=unused-import diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/device_assignment.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/device_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..390aa7210fa2be62a331fa59fdf0d44e665b473f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/device_assignment.py @@ -0,0 +1,569 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ====================================== +"""Library of TPU helper functions.""" + +import enum +import math +from typing import List, Optional, Tuple + +import numpy as np + +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.tpu.topology import Topology +from tensorflow.python.util.tf_export import tf_export + + +SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0, 0]]] + + +def _compute_task_and_cores_to_replicas(core_assignment, topology): + """Computes a nested dict which maps task and logical core to replicas.""" + task_and_cores_to_replicas = {} + for replica in range(core_assignment.shape[0]): + for logical_core in range(core_assignment.shape[1]): + coordinates = core_assignment[replica, logical_core, :] + task_id = topology.task_ordinal_at_coordinates(coordinates) + if task_id not in task_and_cores_to_replicas: + task_and_cores_to_replicas[task_id] = {} + if logical_core not in task_and_cores_to_replicas[task_id]: + task_and_cores_to_replicas[task_id][logical_core] = set() + + task_and_cores_to_replicas[task_id][logical_core].add(replica) + + task_to_sorted_replica_id = {} + + for task, core_to_replicas in task_and_cores_to_replicas.items(): + core_to_sorted_replicas = {} + for core, replicas in core_to_replicas.items(): + core_to_sorted_replicas[core] = sorted(replicas) + + task_to_sorted_replica_id[task] = core_to_sorted_replicas + return task_to_sorted_replica_id + + +@tf_export("tpu.experimental.DeviceOrderMode") +class DeviceOrderMode(enum.IntEnum): + """The way of determining device orders when computing device assignment.""" + # By default the mode is set to AUTO, the library will choose to form rings + # when that is possible. + AUTO = 0 + # Form rings for replicas and model-parallel cores. + RING = 1 + # Form meshes for replicas and/or model-parallel cores. + MESH = 2 + + +@tf_export("tpu.experimental.DeviceAssignment") +class DeviceAssignment(object): + """Mapping from logical cores in a computation to the physical TPU topology. + + Prefer to use the `DeviceAssignment.build()` helper to construct a + `DeviceAssignment`; it is easier if less flexible than constructing a + `DeviceAssignment` directly. + """ + + def __init__(self, topology: Topology, core_assignment: np.ndarray): + """Constructs a `DeviceAssignment` object. + + Args: + topology: A `Topology` object that describes the physical TPU topology. + core_assignment: A logical to physical core mapping, represented as a + rank 3 numpy array. See the description of the `core_assignment` + property for more details. + + Raises: + ValueError: If `topology` is not `Topology` object. + ValueError: If `core_assignment` is not a rank 3 numpy array. + """ + if not isinstance(topology, Topology): + raise ValueError("topology must be a Topology object, got {}".format( + type(topology))) + core_assignment = np.asarray(core_assignment, dtype=np.int32) + + self._topology = topology + + if core_assignment.ndim != 3: + raise ValueError("core_assignment must be a rank 3 numpy array, " + f"got shape {core_assignment.shape}") + + self._num_replicas = core_assignment.shape[0] + self._num_cores_per_replica = core_assignment.shape[1] + + if core_assignment.shape[-1] != topology.mesh_rank: + raise ValueError( + "core_assignment.shape[-1] must have size equal to topology " + f"rank ({topology.mesh_rank}), got " + f"core_assignment.shape={core_assignment.shape}") + + self._core_assignment = core_assignment + self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas( + self._core_assignment, topology) + + @property + def topology(self) -> Topology: + """A `Topology` that describes the TPU topology.""" + return self._topology + + @property + def num_cores_per_replica(self) -> int: + """The number of cores per replica.""" + return self._num_cores_per_replica + + @property + def num_replicas(self) -> int: + """The number of replicas of the computation.""" + return self._num_replicas + + @property + def core_assignment(self) -> np.ndarray: + """The logical to physical core mapping. + + Returns: + An integer numpy array of rank 3, with shape + `[num_replicas, num_cores_per_replica, topology_rank]`. Maps + (replica, logical core) pairs to physical topology coordinates. + """ + return self._core_assignment + + def coordinates(self, replica: int, logical_core: int) -> Tuple: # pylint:disable=g-bare-generic + """Returns the physical topology coordinates of a logical core.""" + return tuple(self.core_assignment[replica, logical_core, :]) + + def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]: + """Lookup replica ids by task number and logical core. + + Args: + task_id: TensorFlow task number. + logical_core: An integer, identifying a logical core. + Returns: + A sorted list of the replicas that are attached to that task and + logical_core. + Raises: + ValueError: If no replica exists in the task which contains the logical + core. + """ + try: + return self._task_and_cores_to_replicas[task_id][logical_core] + except KeyError: + raise ValueError( + "Can not find any replica in task: {} contains logical_core: {} ". + format(task_id, logical_core)) + + def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int: + """Returns the ordinal of the TPU device assigned to a logical core.""" + coordinates = self.coordinates(replica, logical_core) + return self._topology.tpu_device_ordinal_at_coordinates(coordinates) + + def host_device(self, + replica: int = 0, + logical_core: int = 0, + job: Optional[str] = None) -> str: + """Returns the CPU device attached to a logical core.""" + coordinates = self.coordinates(replica, logical_core) + return self._topology.cpu_device_name_at_coordinates(coordinates, job=job) + + def tpu_device(self, + replica: int = 0, + logical_core: int = 0, + job: Optional[str] = None) -> str: + """Returns the name of the TPU device assigned to a logical core.""" + coordinates = self.coordinates(replica, logical_core) + return self._topology.tpu_device_name_at_coordinates(coordinates, job=job) + + @classmethod + def build( + cls, + topology: Topology, + computation_shape: Optional[np.ndarray] = None, + computation_stride: Optional[np.ndarray] = None, + num_replicas: int = 1, + device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO, + ) -> "DeviceAssignment": + return device_assignment( + topology=topology, + computation_shape=computation_shape, + computation_stride=computation_stride, + num_replicas=num_replicas, + device_order_mode=device_order_mode, + ) + + +def _open_ring_2d(x_size: int, y_size: int, + z_coord: int) -> List[Tuple[int, int, int]]: + """Ring-order of a X by Y mesh, with a fixed Z coordinate. + + For example, in a 4x4 mesh, this returns the following order. + 0 -- 1 -- 2 -- 3 + | | | | + 15-- 6 -- 5 -- 4 + | | | | + 14-- 7 -- 8 -- 9 + | | | | + 13-- 12-- 11-- 10 + + Note that chip 0 is not included in the output. + + Args: + x_size: An integer represents the mesh size in the x-dimension. Must be + larger than 1. + y_size: An integer represents the mesh size in the y-dimension. Must be + larger than 1. + z_coord: An integer represents the z-coordinate to use for the chips in the + ring. + + Returns: + A list of (x,y,z) triples in ring order. + """ + ret = [] + for i in range(y_size // 2): + for j in range(1, x_size): + ret.append((j, 2 * i, z_coord)) + for j in range(x_size - 1, 0, -1): + ret.append((j, 2 * i + 1, z_coord)) + for i in range(y_size - 1, 0, -1): + ret.append((0, i, z_coord)) + return ret + + +def _ring_3d(x_size: int, y_size: int, + z_size: int) -> List[Tuple[int, int, int]]: + """Ring-order of a X by Y by Z mesh. + + Constructs the 3d ring from 2d rings that are stacked in the Z dimension and + joined in one corner. + + z == 0: + 0 -- 1 -- 2 -- 3 + | | | | + 15 - 6 -- 5 -- 4 + | | | | + 14 - 7 -- 8 -- 9 + | | | | + 13 - 12 - 11 - 10 + z == 1: + 63 - 30 - 29 - 28 + | | | | + 16 - 25 - 26 - 27 + | | | | + 17 - 24 - 23 - 22 + | | | | + 18 - 19 - 20 - 21 + z == 2: + 62 - 31 - 32 - 33 + | | | | + 45 - 36 - 35 - 34 + | | | | + 44 - 37 - 38 - 39 + | | | | + 43 - 42 - 41 - 40 + z == 3: + 61 - 60 - 59 - 58 + | | | | + 46 - 55 - 56 - 57 + | | | | + 47 - 54 - 53 - 52 + | | | | + 48 - 49 - 50 - 51 + + Args: + x_size: An integer represents the mesh size in the x-dimension. Must be + larger than 1. + y_size: An integer represents the mesh size in the y-dimension. Must be + larger than 1. + z_size: An integer represents the mesh size in the z-dimension. Must be + larger than 1. For example, in a 4x4x4 mesh, this returns the following + order. + + Returns: + A list of (x,y,z) triples in ring order. + """ + + # Handle the case where 2 dimensions are size 1. + if x_size == 1 and y_size == 1: + return [(0, 0, i) for i in range(z_size)] + if x_size == 1 and z_size == 1: + return [(0, i, 0) for i in range(y_size)] + if y_size == 1 and z_size == 1: + return [(i, 0, 0) for i in range(x_size)] + + # Handle odd mesh dimensions. This never happens in practice, so we don't + # bother to try building something optimal. + if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and + y_size % 2 != 0) or (z_size > 1 and + z_size % 2 != 0): + logging.warning("Odd dimension") + ret = [] + for z in range(z_size): + for y in range(y_size): + ret.extend((x, y, z) for x in range(x_size)) + return ret + + # Always start with chip 0. + ret = [(0, 0, 0)] + # Handle the case where one dimension is size 1. We just build a flat, 2d + # ring. + if z_size == 1: + ret.extend(_open_ring_2d(x_size, y_size, 0)) + return ret + if y_size == 1: + ret = [(0, 0, 0)] + ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0)) + return ret + if x_size == 1: + ret = [(0, 0, 0)] + ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0)) + return ret + + # Handle the case where all dimensions have size > 1 and even. + ret = [(0, 0, 0)] + for i in range(0, z_size): + r = _open_ring_2d(x_size, y_size, i) + if i % 2 == 0: + ret.extend(r) + else: + ret.extend(reversed(r)) + for i in range(z_size - 1, 0, -1): + ret.append((0, 0, i)) + return ret + + +def device_assignment( + topology: Topology, + computation_shape: Optional[np.ndarray] = None, + computation_stride: Optional[np.ndarray] = None, + num_replicas: int = 1, + device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO, +) -> DeviceAssignment: + """Computes a device_assignment of a computation across a TPU topology. + + Attempts to choose a compact grid of cores for locality. + + Returns a `DeviceAssignment` that describes the cores in the topology assigned + to each core of each replica. + + `computation_shape` and `computation_stride` values should be powers of 2 for + optimal packing. + + Args: + topology: A `Topology` object that describes the TPU cluster topology. To + obtain a TPU topology, evaluate the `Tensor` returned by + `initialize_system` using `Session.run`. Either a serialized + `TopologyProto` or a `Topology` object may be passed. Note: you must + evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor` + here. + computation_shape: A rank 1 int32 numpy array with size equal to the + topology rank, describing the shape of the computation's block of cores. + If None, the `computation_shape` is `[1] * topology_rank`. + computation_stride: A rank 1 int32 numpy array of size `topology_rank`, + describing the inter-core spacing of the `computation_shape` cores in the + TPU topology. If None, the `computation_stride` is `[1] * topology_rank`. + num_replicas: The number of computation replicas to run. The replicas will + be packed into the free spaces of the topology. + device_order_mode: An enum of `DeviceOrderMode` class which indicates + whether to assign devices to form rings or meshes, or let the library to + choose. + + Returns: + A DeviceAssignment object, which describes the mapping between the logical + cores in each computation replica and the physical cores in the TPU + topology. + + Raises: + ValueError: If `topology` is not a valid `Topology` object. + ValueError: If `computation_shape` or `computation_stride` are not 1D int32 + numpy arrays with shape [3] where all values are positive. + ValueError: If computation's replicas cannot fit into the TPU topology. + """ + # Deserialize the Topology proto, if it is a string. + if isinstance(topology, bytes): + topology = Topology(serialized=topology) + + if not isinstance(topology, Topology): + raise ValueError( + f"`topology` is not a Topology object; got {type(topology)}") + + topology_rank = len(topology.mesh_shape) + mesh_shape = topology.mesh_shape + if computation_shape is None: + computation_shape = np.array([1] * topology_rank, dtype=np.int32) + else: + computation_shape = np.asarray(computation_shape, dtype=np.int32) + + if computation_stride is None: + computation_stride = np.array([1] * topology_rank, dtype=np.int32) + else: + computation_stride = np.asarray(computation_stride, dtype=np.int32) + + if computation_shape.shape != (topology_rank,): + raise ValueError( + f"computation_shape must have shape [{topology_rank}]; " + f"got {computation_shape.shape}" + ) + if computation_stride.shape != (topology_rank,): + raise ValueError( + f"computation_stride must have shape [{topology_rank}]; " + f"got {computation_stride.shape}" + ) + + if any(computation_shape < 1): + raise ValueError( + "computation_shape must be positive; got computation_shape={}".format( + computation_shape)) + if any(computation_stride < 1): + raise ValueError( + "computation_stride must be positive; got computation_stride={}".format( + computation_stride)) + + # Computes the physical size of one computation instance. + computation_footprint = computation_shape * computation_stride + if any(computation_footprint > mesh_shape): + raise ValueError( + "computation footprint {} does not fit in TPU topology shape {}".format( + computation_footprint, mesh_shape)) + + # Computes how many copies of the computation footprint fit in the mesh. + block_counts = mesh_shape // computation_footprint + + replica_counts = block_counts * computation_stride + max_replicas = np.prod(replica_counts) + if num_replicas > max_replicas: + raise ValueError( + "requested {} replicas but only {} replicas with shape {} and " + "computation_stride {} fit in a TPU mesh of shape {}".format( + num_replicas, max_replicas, computation_shape, computation_stride, + mesh_shape)) + + def ceil_of_ratio(n, m): + return (n + m - 1) // m + + if topology.missing_devices.size == 0: + replica_shape = [0] * topology_rank + if num_replicas > 0: + remaining_replicas = num_replicas + remaining_dims = topology_rank + + # Choose dimensions as close to an equal cube as possible, + # in order of increasing dimension size. By visiting dimensions + # in increasing size, we assign the most constrained dimension + # first, so we won't make infeasible choices. + # + # As a secondary sort order, visit the last dimension (core index) first, + # then the other dimensions in increasing order. This means we try to use + # both cores on the same chip in preference to two cores on different + # chips. We visit the x dimension first, and the z dimension last, so + # that we prefer to arrange adjacent replicas on the same machine when + # possible. + # + # For example, if num_replicas == 4, we prefer to use a replica_shape of + # (2,1,1,2) over (1,1,2,2). + + for x, ni in sorted(((x, ((i + 1) % topology_rank)) + for (i, x) in enumerate(replica_counts))): + i = (ni + topology_rank - 1) % topology_rank + target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims))) + replica_shape[i] = min(target_size, x) + remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i]) + remaining_dims -= 1 + + assert remaining_replicas == 1 and remaining_dims == 0 + + # Assigns an offset to each replica such that no two replicas overlap. + replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32) + + enable_3d_tiling = ( + topology_rank == 4 and + computation_shape[-1] == mesh_shape[-1] # Only handle 3D case. + and np.prod(computation_stride) == 1 # Ensure no stride. + and num_replicas == max_replicas) # Full replication. + + if device_order_mode != DeviceOrderMode.AUTO: + if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling: + raise ValueError( + "device_order_mode=DeviceOrderMode.RING is not compatible with the " + "3D tiling current topology. Try setting " + "device_order_mode=DeviceOrderMode.AUTO" + ) + enable_3d_tiling = device_order_mode == DeviceOrderMode.RING + + if enable_3d_tiling: + assignment = [] + inner_ring = _ring_3d(computation_shape[0], computation_shape[1], + computation_shape[2]) + outer_ring = _ring_3d(replica_shape[0], replica_shape[1], + replica_shape[2]) + + for replica in range(num_replicas): + outer_x, outer_y, outer_z = outer_ring[replica] + per_replica_assignment = [] + for index in range(np.prod(computation_shape)): + inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]] + px = outer_x * computation_shape[0] + inner_x + py = outer_y * computation_shape[1] + inner_y + pz = outer_z * computation_shape[2] + inner_z + pi = index % mesh_shape[-1] + per_replica_assignment.append([px, py, pz, pi]) + assignment.append(per_replica_assignment) + else: + for replica in range(num_replicas): + # Chooses a replica number in each axis. + t = replica + pos = [] + # Visit the core number first. + for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]): + pos.append(t % dim) + t //= dim + replica_pos = np.concatenate([pos[1:], [pos[0]]]) + + # Determines where that replica starts in each axis. + outer = replica_pos // computation_stride + inner = replica_pos % computation_stride + replica_offsets[replica, :] = outer * computation_footprint + inner + + # Computes a logical core -> physical core mapping for each replica. + indices = [ + np.arange(0, computation_shape[i] * computation_stride[i], + computation_stride[i]) for i in range(topology_rank) + ] + indices = np.concatenate( + [i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")], + axis=-1) + indices = indices.reshape((-1, topology_rank)) + assignment = indices + replica_offsets[:, np.newaxis, :] + else: + # We have a slice with missing chips. We define a simple assignment by + # ignoring computation stride. This assignment should enable a consistent + # and correct device assignment on degraded slices. It is optimal when + # weights are not sharded. But this device assignment may be sub-optimal for + # other model parallelism scenarios. + assert np.prod(computation_stride) == 1 + # Next, we check if we have sufficient devices. + assert num_replicas * np.prod( + computation_shape) <= topology.num_tasks * topology.num_tpus_per_task + # Map replicas to physical devices in task order. + device_coordinates = topology.device_coordinates + assignment = [] + devices_per_replica = np.prod(computation_shape) + for rindex in range(num_replicas): + replica_assignment = [] + for index in range(devices_per_replica): + logical_id = rindex * devices_per_replica + index + # Pick logical cores in task order + task = logical_id // topology.num_tpus_per_task + device = logical_id % topology.num_tpus_per_task + # Append physical cores to the replica assignment + replica_assignment.append(device_coordinates[task, device, :]) + assignment.append(replica_assignment) + + return DeviceAssignment(topology, core_assignment=assignment) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column.py new file mode 100644 index 0000000000000000000000000000000000000000..adf3dd80f59aca5273307bdd697bb7a94ef22673 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column.py @@ -0,0 +1,690 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =================================================================== +"""TPU Feature Column Library.""" +import math + +from tensorflow.python.feature_column import feature_column as fc +from tensorflow.python.feature_column import feature_column_lib as fc_lib +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.tpu import tpu +from tensorflow.python.tpu import tpu_function +from tensorflow.python.tpu import tpu_replication +# pylint: disable=protected-access + + +_TPU_FC_TO_SCOPE = '_tpu_feature_column_scope' +_SUPPORTED_SEQUENCE_COLUMNS = (fc._SequenceCategoricalColumn, + fc_lib.SequenceCategoricalColumn) + + +# For V2 columns, we support anything that inherits from CategoricalColumn +# other than those in the denylist. User-provided columns that inherit from +# CategoricalColumn may or may not be compatible; it is up to the user to +# manage TPU compatibility for custom columns. +_SUPPORTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.CategoricalColumn,) +_DENYLISTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.HashedCategoricalColumn, + fc_lib.BucketizedColumn, + fc_lib.CrossedColumn) +_SUPPORTED_CATEGORICAL_COLUMNS = (fc._IdentityCategoricalColumn, + fc._VocabularyFileCategoricalColumn, + fc._VocabularyListCategoricalColumn, + fc._WeightedCategoricalColumn, + fc._SequenceCategoricalColumn + ) + _SUPPORTED_CATEGORICAL_COLUMNS_V2 +_SEQUENCE_FEATURE_LENGTH_POSTFIX = '_seq_length_' + + +def embedding_column(categorical_column, + dimension, + combiner='mean', + initializer=None, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True): + """TPU embedding_column for `tf.feature_column.embedding_column`. + + Note that the interface for TPU embedding_column is different from the non-TPU + version. The following args available for the non-TPU version are NOT + supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable. + + Args: + categorical_column: A categorical_column returned from + categorical_column_with_identity, weighted_categorical_column, + categorical_column_with_vocabulary_file, + categorical_column_with_vocabulary_list, + sequence_categorical_column_with_identity, + sequence_categorical_column_with_vocabulary_file, + sequence_categorical_column_with_vocabulary_list + dimension: An integer specifying dimension of the embedding, must be > 0. + combiner: A string specifying how to reduce if there are multiple entries + in a single row for a non-sequence column. For more information, see + `tf.feature_column.embedding_column`. + initializer: A variable initializer function to be used in embedding + variable initialization. If not specified, defaults to + `tf.compat.v1.truncated_normal_initializer` with mean `0.0` and + standard deviation `1/sqrt(dimension)`. + max_sequence_length: An non-negative integer specifying the max sequence + length. Any sequence shorter then this will be padded with 0 embeddings + and any sequence longer will be truncated. This must be positive for + sequence features and 0 for non-sequence features. + learning_rate_fn: A function that takes global step and returns learning + rate for the embedding table. If you intend to use the same learning rate + for multiple embedding tables, please ensure that you pass the exact same + python function to all calls of embedding_column, otherwise performence + may suffer. + use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse + instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures + there are no empty rows and all weights and ids are positive at the + expense of extra compute cost. This only applies to rank 2 (NxM) shaped + input tensors. Defaults to true, consider turning off if the above checks + are not needed. Note that having empty rows will not trigger any error + though the output result might be 0 or omitted. + + Returns: + A _TPUEmbeddingColumn. + + Raises: + ValueError: if `dimension` not > 0. + ValueError: if `initializer` is specified but not callable. + TypeError: if categorical_column is not a supported type. + """ + if isinstance(categorical_column, _DENYLISTED_CATEGORICAL_COLUMNS_V2): + raise TypeError('categorical_column for tpu ' + ' embedding_column was ' + f'denylisted type {type(categorical_column)}') + if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS): + raise TypeError( + 'categorical_column for tpu ' + ' embedding_column must be type {}, got {}.'.format(' or '.join([ + cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS + ]), type(categorical_column))) + if (dimension is None) or (dimension < 1): + raise ValueError('Invalid dimension {}.'.format(dimension)) + + if (initializer is not None) and (not callable(initializer)): + raise ValueError('initializer must be callable if specified. ' + 'Embedding of column_name: {}'.format( + categorical_column.name)) + if initializer is None: + initializer = init_ops.truncated_normal_initializer( + mean=0.0, stddev=1 / math.sqrt(dimension)) + + embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access + + def _creator(weight_collections, scope): + embedding_column_layer = fc._EmbeddingColumnLayer( + embedding_shape=embedding_shape, + initializer=initializer, + weight_collections=weight_collections, + trainable=True, + name='embedding_column_layer') + return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable + + column = _TPUEmbeddingColumn( + categorical_column=categorical_column, + dimension=dimension, + combiner=combiner, + layer_creator=_creator, + ckpt_to_load_from=None, + tensor_name_in_ckpt=None, + max_norm=None, + trainable=True, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn, + use_safe_embedding_lookup=use_safe_embedding_lookup) + # For Embedding column, the initializer is hidden inside the creator Fn, which + # is not accessible later. So, we attach it to a special field. Also note + # that non-TPU Embedding column and non-TPU shared Embedding column handle the + # initializer differently. See shared_embedding_columns for details. + column._tpu_initializer = initializer + return column + + +def shared_embedding_columns(categorical_columns, + dimension, + combiner='mean', + initializer=None, + shared_embedding_collection_name=None, + max_sequence_lengths=None, + learning_rate_fn=None, + use_safe_embedding_lookup=True): + """List of dense columns that convert from sparse, categorical input. + + Note that the interface for TPU embedding_column is different from the non-TPU + version. The following args available for the non-TPU version are NOT + supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable. + + Args: + categorical_columns: A list of categorical_columns returned from + categorical_column_with_identity, weighted_categorical_column, + categorical_column_with_vocabulary_file, + categorical_column_with_vocabulary_list, + sequence_categorical_column_with_identity, + sequence_categorical_column_with_vocabulary_file, + sequence_categorical_column_with_vocabulary_list + dimension: An integer specifying dimension of the embedding, must be > 0. + combiner: A string specifying how to reduce if there are multiple entries + in a single row for a non-sequence column. For more information, see + `tf.feature_column.embedding_column`. + initializer: A variable initializer function to be used in embedding + variable initialization. If not specified, defaults to + `tf.truncated_normal_initializer` with mean `0.0` and standard deviation + `1/sqrt(dimension)`. + shared_embedding_collection_name: Optional name of the collection where + shared embedding weights are added. If not given, a reasonable name will + be chosen based on the names of `categorical_columns`. This is also used + in `variable_scope` when creating shared embedding weights. + max_sequence_lengths: An list of non-negative integers, either None or + empty or the same length as the argument categorical_columns. Entries + corresponding to non-sequence columns must be 0 and entries corresponding + to sequence columns specify the max sequence length for the column. Any + sequence shorter then this will be padded with 0 embeddings and any + sequence longer will be truncated. + learning_rate_fn: A function that takes global step and returns learning + rate for the embedding table. If you intend to use the same learning rate + for multiple embedding tables, please ensure that you pass the exact same + python function to all calls of shared_embedding_columns, otherwise + performence may suffer. + use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse + instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures + there are no empty rows and all weights and ids are positive at the + expense of extra compute cost. This only applies to rank 2 (NxM) shaped + input tensors. Defaults to true, consider turning off if the above checks + are not needed. Note that having empty rows will not trigger any error + though the output result might be 0 or omitted. + + Returns: + A _TPUEmbeddingColumn. + + Raises: + ValueError: if `dimension` not > 0. + ValueError: if `initializer` is specified but not callable. + ValueError: if `max_sequence_lengths` is specified and not the same length + as `categorical_columns`. + ValueError: if `max_sequence_lengths` is positive for a non sequence column + or 0 for a sequence column. + """ + for categorical_column in categorical_columns: + if isinstance(categorical_column, _DENYLISTED_CATEGORICAL_COLUMNS_V2): + raise TypeError('categorical_column for tpu ' + ' embedding_column was denylisted type ' + f'{type(categorical_column)}') + if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS): + raise TypeError( + 'categorical_column for tpu ' + ' shared_embedding_columns must be type {}, got {}.'.format( + ' or '.join( + [cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS]), + type(categorical_column))) + + if not max_sequence_lengths: + max_sequence_lengths = [0] * len(categorical_columns) + if len(max_sequence_lengths) != len(categorical_columns): + raise ValueError('max_sequence_lengths and categorical_columns must be of ' + 'the same length. len(max_sequence_lengths)={} ' + 'len(categorical_columns)={}.'.format( + len(max_sequence_lengths), len(categorical_columns))) + + if (dimension is None) or (dimension < 1): + raise ValueError('Invalid dimension {}.'.format(dimension)) + + if (initializer is not None) and (not callable(initializer)): + raise ValueError('initializer must be callable if specified. ') + if initializer is None: + initializer = init_ops.truncated_normal_initializer( + mean=0.0, stddev=1 / math.sqrt(dimension)) + + # Sort the columns so the default collection name is deterministic even if the + # user passes columns from an unsorted collection, such as dict.values(). + sorted_columns = sorted(categorical_columns, key=lambda x: x.name) + num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access + + for c in sorted_columns[1:]: + if num_buckets != c._num_buckets: # pylint: disable=protected-access + raise ValueError( + 'To use shared_embedding_column, all categorical_columns must have ' + 'the same number of buckets. Given column: {} with buckets: {} does ' + 'not match column: {} with buckets: {}'.format( + sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access + + if not shared_embedding_collection_name: + shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns) + shared_embedding_collection_name += '_shared_embedding' + + tpu_columns = [] + + # Create the state (_SharedEmbeddingColumnLayer) here. + for categorical_column, max_sequence_length in zip( + categorical_columns, max_sequence_lengths): + column = _TPUSharedEmbeddingColumn( + categorical_column=categorical_column, + dimension=dimension, + combiner=combiner, + initializer=initializer, + shared_embedding_collection_name=shared_embedding_collection_name, + ckpt_to_load_from=None, + tensor_name_in_ckpt=None, + max_norm=None, + trainable=True, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn, + use_safe_embedding_lookup=use_safe_embedding_lookup) + tpu_columns.append(column) + + return tpu_columns + + +class _TPUBaseEmbeddingColumn(object): + """Base class for TPU Embedding Column.""" + + def __init__(self, + categorical_column, + max_sequence_length=0, + learning_rate_fn=None): + self._tpu_categorical_column = categorical_column + self._max_sequence_length = max_sequence_length + self._learning_rate_fn = learning_rate_fn + if (self.is_sequence_column() and max_sequence_length < 1): + raise ValueError('max_sequence_length must be greater than 0 for ' + 'sequence columns. Got max_sequence_length={} for ' + 'sequence column {}.'.format(max_sequence_length, + categorical_column.name)) + if (not self.is_sequence_column() and max_sequence_length != 0): + raise ValueError('Non zero max_seq_length={} specified for non ' + 'sequence column {}.'.format(max_sequence_length, + categorical_column.name)) + + def get_combiner(self): + """Returns the embedding combiner.""" + raise NotImplementedError('not implemented') + + def get_embedding_table_size(self): + """Returns the embedding table size, tuple of vocab size and dimension.""" + raise NotImplementedError('not implemented') + + def get_feature_key_name(self): + """Returns the feature key name in the features dict.""" + raise NotImplementedError('not impl') + + def get_weight_key_name(self): + """Return the key name for weights.""" + raise NotImplementedError('not impl') + + def get_embedding_var_name(self): + """Returns the embedding variable name. + + Feature key name and embedding variable name are usually one-to-one mapping. + But for shared embedding columns, it is many-to-one mapping. + """ + raise NotImplementedError('not impl') + + def get_initializer(self): + """Returns the initializer.""" + raise NotImplementedError('not impl') + + def is_categorical_column_weighted(self): + """Check if the categorical column of the embedding column is weighted.""" + raise NotImplementedError('not impl') + + def is_sequence_column(self): + return isinstance(self._tpu_categorical_column, _SUPPORTED_SEQUENCE_COLUMNS) + + def get_max_sequence_length(self): + return self._max_sequence_length + + def get_learning_rate_fn(self): + return self._learning_rate_fn + + def get_sequence_length_feature_key_name(self): + """Get the key for the associated sequence length feature.""" + return get_sequence_length_feature_key_name_from_feature_key_name( + self.get_feature_key_name()) + + +class _TPUEmbeddingColumn(_TPUBaseEmbeddingColumn, fc._EmbeddingColumn): + """Core Embedding Column.""" + + def __new__(cls, + categorical_column, + dimension, + combiner='mean', + layer_creator=None, + ckpt_to_load_from=None, + tensor_name_in_ckpt=None, + max_norm=None, + trainable=True, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True, + bypass_scope_validation=False): + # Note, args ckpt_to_load_from, tensor_name_in_ckpt, max_norm and trainable + # are not supported on TPU. They are solely for matching the signature of + # __new__ of parent class fc._EmbeddingColumn. + del bypass_scope_validation + # pylint: disable=redundant-keyword-arg + return fc._EmbeddingColumn.__new__( + cls, + categorical_column, + dimension, + combiner=combiner, + layer_creator=layer_creator, + ckpt_to_load_from=ckpt_to_load_from, + tensor_name_in_ckpt=tensor_name_in_ckpt, + max_norm=max_norm, + trainable=trainable, + use_safe_embedding_lookup=use_safe_embedding_lookup) + + def __init__(self, + categorical_column, + dimension, + combiner='mean', + layer_creator=None, + ckpt_to_load_from=None, + tensor_name_in_ckpt=None, + max_norm=None, + trainable=True, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True, + bypass_scope_validation=False): + _TPUBaseEmbeddingColumn.__init__( + self, + categorical_column, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn) + self._key = None + # If true, scope validation is skipped to allow the same column to be used + # in multiple variable scopes. By default, this is False, and we expect a + # 1:1 mapping between feature columns and scopes. + self._bypass_scope_validation = bypass_scope_validation + + def get_combiner(self): + return self.combiner + + def get_embedding_table_size(self): + """Returns num_ids and width.""" + return (self.categorical_column._num_buckets, self.dimension) + + def get_feature_key_name(self): + """get_feature_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.categorical_column.name + return self.categorical_column.name + + def get_weight_key_name(self): + """get_weight_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.weight_feature_key + return None + + def get_embedding_var_name(self): + """get_embedding_var_name.""" + return self.categorical_column.name + + def get_initializer(self): + return self._tpu_initializer + + def is_categorical_column_weighted(self): + """Check if the categorical column of the embedding column is weighted.""" + if isinstance( + self.categorical_column, + ( + fc._WeightedCategoricalColumn, # pylint: disable=protected-access + fc_lib.WeightedCategoricalColumn)): + return True + return False + + def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc._EmbeddingColumn._get_dense_tensor( + self, inputs, weight_collections, trainable) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc._EmbeddingColumn._get_dense_tensor( + self, inputs, weight_collections, trainable) + + # TPU mode + # Get the embeddings from the LazyBuilder. + tensor = inputs.get(self.get_feature_key_name()) + + # Add to collection for _create_tpu_embedding_variables_and_ops + _record_variable_scope_and_name( + self.get_embedding_var_name(), + 'embedding_weights', + bypass_scope_validation=self._bypass_scope_validation) + + return tensor + + def _get_sequence_dense_tensor( + self, inputs, weight_collections=None, trainable=None): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc._EmbeddingColumn._get_sequence_dense_tensor( + self, inputs, weight_collections, trainable) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc._EmbeddingColumn._get_sequence_dense_tensor( + self, inputs, weight_collections, trainable) + + tensor = inputs.get(self.get_feature_key_name()) + tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name()) + + # inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1). + # We need to undo this to match the standard CPU sequence embedding. + tensor_lengths = array_ops.squeeze(tensor_lengths, -1) + + # Add to collection for _create_tpu_embedding_variables_and_ops + _record_variable_scope_and_name( + self.get_embedding_var_name(), + 'embedding_weights', + bypass_scope_validation=self._bypass_scope_validation) + + return fc._SequenceDenseColumn.TensorSequenceLengthPair( + dense_tensor=tensor, sequence_length=tensor_lengths) + + +class _TPUSharedEmbeddingColumn(_TPUBaseEmbeddingColumn, + fc._SharedEmbeddingColumn): + """Core Shared Embedding Column.""" + + def __new__(cls, + categorical_column, + dimension, + combiner='mean', + initializer=None, + shared_embedding_collection_name=None, + ckpt_to_load_from=None, + tensor_name_in_ckpt=None, + max_norm=None, + trainable=True, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True): + return fc._SharedEmbeddingColumn.__new__( + cls, + categorical_column, + dimension, + combiner=combiner, + initializer=initializer, + shared_embedding_collection_name=shared_embedding_collection_name, + ckpt_to_load_from=ckpt_to_load_from, + tensor_name_in_ckpt=tensor_name_in_ckpt, + max_norm=max_norm, + trainable=trainable, + use_safe_embedding_lookup=use_safe_embedding_lookup) + + def __init__(self, + categorical_column, + dimension, + combiner='mean', + initializer=None, + shared_embedding_collection_name=None, + ckpt_to_load_from=None, + tensor_name_in_ckpt=None, + max_norm=None, + trainable=True, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True): + + _TPUBaseEmbeddingColumn.__init__( + self, + categorical_column, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn) + self._key = None + + def get_combiner(self): + return self.combiner + + def get_embedding_table_size(self): + """Returns num_ids and width.""" + return (self.categorical_column._num_buckets, self.dimension) + + def get_feature_key_name(self): + """get_feature_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.categorical_column.name + return self.categorical_column.name + + def get_weight_key_name(self): + """get_weight_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.weight_feature_key + return None + + def get_embedding_var_name(self): + """get_embedding_var_name.""" + return self.shared_embedding_collection_name + + def get_initializer(self): + return self.initializer + + def is_categorical_column_weighted(self): + """Check if the categorical column of the embedding column is weighted.""" + if isinstance( + self.categorical_column, + ( + fc._WeightedCategoricalColumn, # pylint: disable=protected-access + fc_lib.WeightedCategoricalColumn)): + return True + return False + + def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc._SharedEmbeddingColumn._get_dense_tensor( + self, inputs, weight_collections, trainable) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc._SharedEmbeddingColumn._get_dense_tensor( + self, inputs, weight_collections, trainable) + + # TPU mode + # Get the embeddings from the LazyBuilder. + tensor = inputs.get(self.get_feature_key_name()) + + # Add to collection for _create_tpu_embedding_variables_and_ops + _record_variable_scope_and_name( + self.get_embedding_var_name(), + 'embedding_weights', + is_shared_embedding=True) + return tensor + + def _get_sequence_dense_tensor( + self, inputs, weight_collections=None, trainable=None): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc._SharedEmbeddingColumn._get_sequence_dense_tensor( + self, inputs, weight_collections, trainable) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc._SharedEmbeddingColumn._get_sequence_dense_tensor( + self, inputs, weight_collections, trainable) + + tensor = inputs.get(self.get_feature_key_name()) + tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name()) + + # Add to collection for _create_tpu_embedding_variables_and_ops + _record_variable_scope_and_name( + self.get_embedding_var_name(), + 'embedding_weights', + is_shared_embedding=True) + + return fc._SequenceDenseColumn.TensorSequenceLengthPair( + dense_tensor=tensor, sequence_length=tensor_lengths) + + +def _record_variable_scope_and_name(embedding_var_name, + embedding_var_name_in_fc, + is_shared_embedding=False, + bypass_scope_validation=False): + """Add embedding variable name and scope to collection.""" + g = ops.get_default_graph() + collection = g.get_collection_ref(_TPU_FC_TO_SCOPE) + if not collection: + collection.append({}) + + var_def_dict = collection[0] + + captured_scope = variable_scope.get_variable_scope() + captured_scope_name = captured_scope.name + + if embedding_var_name in var_def_dict: + if (var_def_dict[embedding_var_name][0] != captured_scope_name and + not is_shared_embedding and not bypass_scope_validation): + raise ValueError( + 'For embedding var name {}, the variable scope name is different, ' + 'got {}; expected {}'.format(embedding_var_name, + captured_scope_name, + var_def_dict[embedding_var_name][0])) + if var_def_dict[embedding_var_name][1] != embedding_var_name_in_fc: + raise ValueError( + 'For embedding var name {}, the embedding name is different, ' + 'got {}; expected {}'.format(embedding_var_name, + embedding_var_name_in_fc, + var_def_dict[embedding_var_name][1])) + else: + var_def_dict[embedding_var_name] = (captured_scope_name, + embedding_var_name_in_fc) + + +def _is_running_on_cpu(): + """Returns True if the current context is CPU model.""" + return tpu_function.get_tpu_context().number_of_shards is None + + +def get_sequence_length_feature_key_name_from_feature_key_name(feature_name): + """Gets the name of the sequence length feature from that of the base feature. + + Args: + feature_name: The feature key of a sequence column. + + Returns: + A string which is the feature key for the associated feature length column. + """ + return feature_name + _SEQUENCE_FEATURE_LENGTH_POSTFIX diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..e1b8f8012747305cf99458246d9a80f9a03172ba --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column_v2.py @@ -0,0 +1,1097 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =================================================================== +"""TPU Feature Column Library.""" +import copy +import enum +import math +from tensorflow.python.feature_column import feature_column as fc +from tensorflow.python.feature_column import feature_column_lib as fc_lib +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import embedding_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.tpu import tpu +from tensorflow.python.tpu import tpu_replication +from tensorflow.python.tpu.feature_column import _is_running_on_cpu +from tensorflow.python.tpu.feature_column import _record_variable_scope_and_name +from tensorflow.python.tpu.feature_column import _SUPPORTED_CATEGORICAL_COLUMNS_V2 +from tensorflow.python.tpu.feature_column import _SUPPORTED_SEQUENCE_COLUMNS +from tensorflow.python.tpu.feature_column import _TPUBaseEmbeddingColumn +from tensorflow.python.util.tf_export import tf_export +# pylint: disable=protected-access + +_ALLOWED_DEVICES = ['cpu', 'tpu_tensor_core', 'tpu_embedding_core'] +_TENSOR_CORE_MASK_KEY_SUFFIX = '__TENSOR_CORE_MASK' + + +class EmbeddingDevice(enum.Enum): + CPU = 1 + TPU_TENSOR_CORE = 2 + TPU_EMBEDDING_CORE = 3 + + +@tf_export(v1=['tpu.experimental.embedding_column']) +def embedding_column_v2(categorical_column, + dimension, + combiner='mean', + initializer=None, + max_sequence_length=0, + learning_rate_fn=None, + embedding_lookup_device=None, + tensor_core_shape=None, + use_safe_embedding_lookup=True): + """TPU version of `tf.compat.v1.feature_column.embedding_column`. + + Note that the interface for `tf.tpu.experimental.embedding_column` is + different from that of `tf.compat.v1.feature_column.embedding_column`: The + following arguments are NOT supported: `ckpt_to_load_from`, + `tensor_name_in_ckpt`, `max_norm` and `trainable`. + + Use this function in place of `tf.compat.v1.feature_column.embedding_column` + when you want to use the TPU to accelerate your embedding lookups via TPU + embeddings. + + ``` + column = tf.feature_column.categorical_column_with_identity(...) + tpu_column = tf.tpu.experimental.embedding_column(column, 10) + ... + def model_fn(features): + dense_feature = tf.keras.layers.DenseFeature(tpu_column) + embedded_feature = dense_feature(features) + ... + + estimator = tf.estimator.tpu.TPUEstimator( + model_fn=model_fn, + ... + embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( + column=[tpu_column], + ...)) + ``` + + Args: + categorical_column: A categorical column returned from + `categorical_column_with_identity`, `weighted_categorical_column`, + `categorical_column_with_vocabulary_file`, + `categorical_column_with_vocabulary_list`, + `sequence_categorical_column_with_identity`, + `sequence_categorical_column_with_vocabulary_file`, + `sequence_categorical_column_with_vocabulary_list` + dimension: An integer specifying dimension of the embedding, must be > 0. + combiner: A string specifying how to reduce if there are multiple entries + in a single row for a non-sequence column. For more information, see + `tf.feature_column.embedding_column`. + initializer: A variable initializer function to be used in embedding + variable initialization. If not specified, defaults to + `tf.compat.v1.truncated_normal_initializer` with mean `0.0` and + standard deviation `1/sqrt(dimension)`. + max_sequence_length: An non-negative integer specifying the max sequence + length. Any sequence shorter then this will be padded with 0 embeddings + and any sequence longer will be truncated. This must be positive for + sequence features and 0 for non-sequence features. + learning_rate_fn: A function that takes global step and returns learning + rate for the embedding table. If you intend to use the same learning rate + for multiple embedding tables, please ensure that you pass the exact same + python function to all calls of embedding_column, otherwise performence + may suffer. + embedding_lookup_device: The device on which to run the embedding lookup. + Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core". + If specifying "tpu_tensor_core", a tensor_core_shape must be supplied. + If not specified, the default behavior is embedding lookup on + "tpu_embedding_core" for training and "cpu" for inference. + Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"] + Valid options for serving : ["cpu", "tpu_tensor_core"] + For training, tpu_embedding_core is good for large embedding vocab (>1M), + otherwise, tpu_tensor_core is often sufficient. + For serving, doing embedding lookup on tpu_tensor_core during serving is + a way to reduce host cpu usage in cases where that is a bottleneck. + tensor_core_shape: If supplied, a list of integers which specifies + the intended dense shape to run embedding lookup for this feature on + TensorCore. The batch dimension can be left None or -1 to indicate + a dynamic shape. Only rank 2 shapes currently supported. + use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse + instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures + there are no empty rows and all weights and ids are positive at the + expense of extra compute cost. This only applies to rank 2 (NxM) shaped + input tensors. Defaults to true, consider turning off if the above checks + are not needed. Note that having empty rows will not trigger any error + though the output result might be 0 or omitted. + + Returns: + A `_TPUEmbeddingColumnV2`. + + Raises: + ValueError: if `dimension` not > 0. + ValueError: if `initializer` is specified but not callable. + """ + + if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2): + raise TypeError( + 'categorical_column for tpu ' + 'embedding_column must be type {}, got {}.'.format(' or '.join([ + cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2 + ]), type(categorical_column))) + if (dimension is None) or (dimension < 1): + raise ValueError('Invalid dimension {}.'.format(dimension)) + if tensor_core_shape and len(tensor_core_shape) != 2: + raise ValueError( + 'tensor_core_shape must be size 2. Got {}.'.format(tensor_core_shape)) + + if (initializer is not None) and (not callable(initializer)): + raise ValueError('initializer must be callable if specified. ' + 'Embedding of column_name: {}'.format( + categorical_column.name)) + if initializer is None: + initializer = init_ops.truncated_normal_initializer( + mean=0.0, stddev=1 / math.sqrt(dimension)) + + if (embedding_lookup_device and + embedding_lookup_device not in _ALLOWED_DEVICES): + raise ValueError( + f'If set, embedding_lookup_device must be in {_ALLOWED_DEVICES}') + + if embedding_lookup_device == 'cpu': + embedding_lookup_device = EmbeddingDevice.CPU + elif embedding_lookup_device == 'tpu_tensor_core': + embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE + elif embedding_lookup_device == 'tpu_embedding_core': + embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE + + if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE: + if not tensor_core_shape: + raise ValueError('Using embedding_lookup_device=tpu_tensor_core requires ' + 'tensor_core_shape to be set.') + if isinstance(categorical_column, _SUPPORTED_SEQUENCE_COLUMNS): + raise ValueError('embedding_lookup_device=tpu_tensor_core currently does ' + 'not support sequence columns.') + + if not embedding_lookup_device: + return _TPUEmbeddingColumnV2( + categorical_column=categorical_column, + dimension=dimension, + combiner=combiner, + initializer=initializer, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn, + use_safe_embedding_lookup=use_safe_embedding_lookup) + else: + return _TPUDeviceSpecificEmbeddingColumnV2( + categorical_column=categorical_column, + dimension=dimension, + combiner=combiner, + initializer=initializer, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn, + embedding_lookup_device=embedding_lookup_device, + tensor_core_shape=tensor_core_shape, + use_safe_embedding_lookup=use_safe_embedding_lookup) + + +@tf_export(v1=['tpu.experimental.shared_embedding_columns']) +def shared_embedding_columns_v2(categorical_columns, + dimension, + combiner='mean', + initializer=None, + shared_embedding_collection_name=None, + max_sequence_lengths=None, + learning_rate_fn=None, + embedding_lookup_device=None, + tensor_core_shape=None, + use_safe_embedding_lookup=True): + """TPU version of `tf.compat.v1.feature_column.shared_embedding_columns`. + + Note that the interface for `tf.tpu.experimental.shared_embedding_columns` is + different from that of `tf.compat.v1.feature_column.shared_embedding_columns`: + The following arguments are NOT supported: `ckpt_to_load_from`, + `tensor_name_in_ckpt`, `max_norm` and `trainable`. + + Use this function in place of + tf.compat.v1.feature_column.shared_embedding_columns` when you want to use the + TPU to accelerate your embedding lookups via TPU embeddings. + + ``` + column_a = tf.feature_column.categorical_column_with_identity(...) + column_b = tf.feature_column.categorical_column_with_identity(...) + tpu_columns = tf.tpu.experimental.shared_embedding_columns( + [column_a, column_b], 10) + ... + def model_fn(features): + dense_feature = tf.keras.layers.DenseFeature(tpu_columns) + embedded_feature = dense_feature(features) + ... + + estimator = tf.estimator.tpu.TPUEstimator( + model_fn=model_fn, + ... + embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( + column=tpu_columns, + ...)) + ``` + + Args: + categorical_columns: A list of categorical columns returned from + `categorical_column_with_identity`, `weighted_categorical_column`, + `categorical_column_with_vocabulary_file`, + `categorical_column_with_vocabulary_list`, + `sequence_categorical_column_with_identity`, + `sequence_categorical_column_with_vocabulary_file`, + `sequence_categorical_column_with_vocabulary_list` + dimension: An integer specifying dimension of the embedding, must be > 0. + combiner: A string specifying how to reduce if there are multiple entries in + a single row for a non-sequence column. For more information, see + `tf.feature_column.embedding_column`. + initializer: A variable initializer function to be used in embedding + variable initialization. If not specified, defaults to + `tf.truncated_normal_initializer` with mean `0.0` and standard deviation + `1/sqrt(dimension)`. + shared_embedding_collection_name: Optional name of the collection where + shared embedding weights are added. If not given, a reasonable name will + be chosen based on the names of `categorical_columns`. This is also used + in `variable_scope` when creating shared embedding weights. + max_sequence_lengths: An list of non-negative integers, either None or empty + or the same length as the argument categorical_columns. Entries + corresponding to non-sequence columns must be 0 and entries corresponding + to sequence columns specify the max sequence length for the column. Any + sequence shorter then this will be padded with 0 embeddings and any + sequence longer will be truncated. + learning_rate_fn: A function that takes global step and returns learning + rate for the embedding table. If you intend to use the same learning rate + for multiple embedding tables, please ensure that you pass the exact same + python function to all calls of shared_embedding_columns, otherwise + performence may suffer. + embedding_lookup_device: The device on which to run the embedding lookup. + Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core". If + specifying "tpu_tensor_core", a tensor_core_shape must be supplied. + Defaults to "cpu". If not specified, the default behavior is embedding + lookup on "tpu_embedding_core" for training and "cpu" for inference. + Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"] + Valid options for serving : ["cpu", "tpu_tensor_core"] + For training, tpu_embedding_core is good for large embedding vocab (>1M), + otherwise, tpu_tensor_core is often sufficient. + For serving, doing embedding lookup on tpu_tensor_core during serving is + a way to reduce host cpu usage in cases where that is a bottleneck. + tensor_core_shape: If supplied, a list of integers which specifies the + intended dense shape to run embedding lookup for this feature on + TensorCore. The batch dimension can be left None or -1 to indicate a + dynamic shape. Only rank 2 shapes currently supported. + use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse + instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures + there are no empty rows and all weights and ids are positive at the + expense of extra compute cost. This only applies to rank 2 (NxM) shaped + input tensors. Defaults to true, consider turning off if the above checks + are not needed. Note that having empty rows will not trigger any error + though the output result might be 0 or omitted. + + Returns: + A list of `_TPUSharedEmbeddingColumnV2`. + + Raises: + ValueError: if `dimension` not > 0. + ValueError: if `initializer` is specified but not callable. + ValueError: if `max_sequence_lengths` is specified and not the same length + as `categorical_columns`. + ValueError: if `max_sequence_lengths` is positive for a non sequence column + or 0 for a sequence column. + """ + + for categorical_column in categorical_columns: + if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2): + raise TypeError( + 'categorical_column for tpu ' + ' shared_embedding_columns must be type {}, got {}.'.format( + ' or '.join( + [cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2]), + type(categorical_column))) + + if not max_sequence_lengths: + max_sequence_lengths = [0] * len(categorical_columns) + if len(max_sequence_lengths) != len(categorical_columns): + raise ValueError('max_sequence_lengths and categorical_columns must be of ' + 'the same length. len(max_sequence_lengths)={} ' + 'len(categorical_columns)={}.'.format( + len(max_sequence_lengths), len(categorical_columns))) + + if (dimension is None) or (dimension < 1): + raise ValueError('Invalid dimension {}.'.format(dimension)) + if tensor_core_shape and len(tensor_core_shape) != 2: + raise ValueError( + 'tensor_core_shape must be size 2. Got {}.'.format(tensor_core_shape)) + + if (initializer is not None) and (not callable(initializer)): + raise ValueError('initializer must be callable if specified. ') + if initializer is None: + initializer = init_ops.truncated_normal_initializer( + mean=0.0, stddev=1 / math.sqrt(dimension)) + + # Sort the columns so the default collection name is deterministic even if the + # user passes columns from an unsorted collection, such as dict.values(). + sorted_columns = sorted(categorical_columns, key=lambda x: x.name) + num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access + + for c in sorted_columns[1:]: + if num_buckets != c._num_buckets: # pylint: disable=protected-access + raise ValueError( + 'To use shared_embedding_column, all categorical_columns must have ' + 'the same number of buckets. Given column: {} with buckets: {} does ' + 'not match column: {} with buckets: {}'.format( + sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access + + if not shared_embedding_collection_name: + shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns) + shared_embedding_collection_name += '_shared_embedding' + + tpu_columns = [] + + column_creator = fc_lib.SharedEmbeddingColumnCreator( + dimension=dimension, initializer=initializer, ckpt_to_load_from=None, + tensor_name_in_ckpt=None, num_buckets=num_buckets, trainable=None, + name=shared_embedding_collection_name) + + if (embedding_lookup_device and + embedding_lookup_device not in _ALLOWED_DEVICES): + raise ValueError( + f'If set, embedding_lookup_device must be in {_ALLOWED_DEVICES}') + + if embedding_lookup_device == 'cpu': + embedding_lookup_device = EmbeddingDevice.CPU + elif embedding_lookup_device == 'tpu_tensor_core': + embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE + elif embedding_lookup_device == 'tpu_embedding_core': + embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE + + if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE: + if not tensor_core_shape: + raise ValueError('Using embedding_lookup_device=tpu_tensor_core requires ' + 'tensor_core_shape to be set.') + for c in sorted_columns: + if isinstance(c, _SUPPORTED_SEQUENCE_COLUMNS): + raise ValueError('embedding_lookup_device=tpu_tensor_core currently ' + 'does not support sequence columns.') + + # Create the state (_SharedEmbeddingColumnLayer) here. + for categorical_column, max_sequence_length in zip( + categorical_columns, max_sequence_lengths): + if not embedding_lookup_device: + column = _TPUSharedEmbeddingColumnV2( + categorical_column=categorical_column, + shared_embedding_column_creator=column_creator, + combiner=combiner, + initializer=initializer, + shared_embedding_collection_name=shared_embedding_collection_name, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn, + use_safe_embedding_lookup=use_safe_embedding_lookup) + else: + column = _TPUSharedDeviceSpecificEmbeddingColumnV2( + categorical_column=categorical_column, + shared_embedding_column_creator=column_creator, + combiner=combiner, + initializer=initializer, + shared_embedding_collection_name=shared_embedding_collection_name, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn, + embedding_lookup_device=embedding_lookup_device, + tensor_core_shape=tensor_core_shape, + use_safe_embedding_lookup=use_safe_embedding_lookup) + tpu_columns.append(column) + + return tpu_columns + + +class _TPUEmbeddingColumnV2(_TPUBaseEmbeddingColumn, fc_lib.EmbeddingColumn): + """Core Embedding Column.""" + + def __new__(cls, + categorical_column, + dimension, + combiner='mean', + initializer=None, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True, + bypass_scope_validation=False): + del bypass_scope_validation + # pylint: disable=redundant-keyword-arg + return fc_lib.EmbeddingColumn.__new__( + cls, + categorical_column, + dimension, + combiner=combiner, + initializer=initializer, + ckpt_to_load_from=None, + tensor_name_in_ckpt=None, + max_norm=None, + trainable=True, + use_safe_embedding_lookup=use_safe_embedding_lookup) + + def __getnewargs__(self): + return (self._tpu_categorical_column, self.dimension, self.combiner, + self.initializer, self._max_sequence_length, self._learning_rate_fn, + self.use_safe_embedding_lookup, self._bypass_scope_validation) + + def __deepcopy__(self, memo): + return _TPUEmbeddingColumnV2( + *(copy.deepcopy(a, memo) for a in self.__getnewargs__())) + + def __init__(self, + categorical_column, + dimension, + combiner='mean', + initializer=None, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True, + bypass_scope_validation=False): + _TPUBaseEmbeddingColumn.__init__( + self, + categorical_column, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn) + self._key = None + # If true, scope validation is skipped to allow the same column to be used + # in multiple variable scopes. By default, this is False, and we expect a + # 1:1 mapping between feature columns and scopes. + self._bypass_scope_validation = bypass_scope_validation + + def get_combiner(self): + return self.combiner + + def get_embedding_table_size(self): + """Returns num_ids and width.""" + return (self.categorical_column._num_buckets, self.dimension) + + def get_feature_key_name(self): + """get_feature_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.categorical_column.name + return self.categorical_column.name + + def get_weight_key_name(self): + """get_weight_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.weight_feature_key + return None + + def get_embedding_var_name(self): + """get_embedding_var_name.""" + return self.categorical_column.name + + def get_initializer(self): + return self.initializer + + def is_categorical_column_weighted(self): + """Check if the categorical column of the embedding column is weighted.""" + if isinstance( + self.categorical_column, + ( + fc._WeightedCategoricalColumn, # pylint: disable=protected-access + fc_lib.WeightedCategoricalColumn)): + return True + return False + + def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc_lib.EmbeddingColumn._get_dense_tensor( + self, inputs, weight_collections, trainable) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc_lib.EmbeddingColumn._get_dense_tensor( + self, inputs, weight_collections, trainable) + + # TPU mode + # Get the embeddings from the LazyBuilder. + tensor = inputs.get(self.get_feature_key_name()) + + # Add to collection for _create_tpu_embedding_variables_and_ops + _record_variable_scope_and_name( + self.get_embedding_var_name(), + 'embedding_weights', + bypass_scope_validation=self._bypass_scope_validation) + + return tensor + + def create_state(self, state_manager): + if _is_running_on_cpu(): + return fc_lib.EmbeddingColumn.create_state( + self, state_manager) + + # Create state is called for the EmbeddingColumn to create its embedding + # variables under feature column V2, if we are on TPU so record the scope + # here. + _record_variable_scope_and_name( + self.get_embedding_var_name(), + 'embedding_weights', + bypass_scope_validation=self._bypass_scope_validation) + + def get_dense_tensor(self, transformation_cache, state_manager): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc_lib.EmbeddingColumn.get_dense_tensor( + self, transformation_cache, state_manager) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc_lib.EmbeddingColumn.get_dense_tensor( + self, transformation_cache, state_manager) + + # TPU mode + # Get the embeddings from the FeatureTransformationCache. + tensor = transformation_cache.get(self.get_feature_key_name(), + state_manager) + + return tensor + + def _get_sequence_dense_tensor( + self, inputs, weight_collections=None, trainable=None): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc_lib.EmbeddingColumn._get_sequence_dense_tensor( + self, inputs, weight_collections, trainable) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc_lib.EmbeddingColumn._get_sequence_dense_tensor( + self, inputs, weight_collections, trainable) + + tensor = inputs.get(self.get_feature_key_name()) + tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name()) + + # inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1). + # We need to undo this to match the standard CPU sequence embedding. + tensor_lengths = array_ops.squeeze(tensor_lengths, -1) + + # Add to collection for _create_tpu_embedding_variables_and_ops + _record_variable_scope_and_name( + self.get_embedding_var_name(), + 'embedding_weights', + bypass_scope_validation=self._bypass_scope_validation) + + return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair( + dense_tensor=tensor, sequence_length=tensor_lengths) + + def get_sequence_dense_tensor(self, transformation_cache, state_manager): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc_lib.EmbeddingColumn.get_sequence_dense_tensor( + self, transformation_cache, state_manager) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc_lib.EmbeddingColumn.get_sequence_dense_tensor( + self, transformation_cache, state_manager) + + tensor = transformation_cache.get(self.get_feature_key_name(), + state_manager) + tensor_lengths = transformation_cache.get( + self.get_sequence_length_feature_key_name(), + state_manager) + + # FeatureTransformationCache expands rank 1 tensors (like sequence length) + # to rank 2. We need to undo this to match the standard CPU sequence + # embedding. + tensor_lengths = array_ops.squeeze(tensor_lengths, -1) + + return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair( + dense_tensor=tensor, sequence_length=tensor_lengths) + + +class _TPUSharedEmbeddingColumnV2(_TPUBaseEmbeddingColumn, + fc_lib.SharedEmbeddingColumn): + """Core Shared Embedding Column.""" + + def __new__(cls, + categorical_column, + shared_embedding_column_creator, + combiner='mean', + initializer=None, + shared_embedding_collection_name=None, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True): + # pylint: disable=redundant-keyword-arg + return fc_lib.SharedEmbeddingColumn.__new__( + cls, + categorical_column, + combiner=combiner, + shared_embedding_column_creator=shared_embedding_column_creator, + max_norm=None, + use_safe_embedding_lookup=use_safe_embedding_lookup) + + def __getnewargs__(self): + return (self._tpu_categorical_column, self.shared_embedding_column_creator, + self.combiner, self._initializer, + self._shared_embedding_collection_name, self._max_sequence_length, + self._learning_rate_fn) + + def __deepcopy__(self, memo): + return _TPUSharedEmbeddingColumnV2( + *(copy.deepcopy(a, memo) for a in self.__getnewargs__())) + + def __init__(self, + categorical_column, + shared_embedding_column_creator, + combiner='mean', + initializer=None, + shared_embedding_collection_name=None, + max_sequence_length=0, + learning_rate_fn=None, + use_safe_embedding_lookup=True): + + _TPUBaseEmbeddingColumn.__init__( + self, + categorical_column, + max_sequence_length=max_sequence_length, + learning_rate_fn=learning_rate_fn) + self._initializer = initializer + self._shared_embedding_collection_name = shared_embedding_collection_name + + def get_combiner(self): + return self.combiner + + def get_embedding_table_size(self): + """Returns num_ids and width.""" + return (self.categorical_column._num_buckets, + self.shared_embedding_column_creator.dimension) + + def get_feature_key_name(self): + """get_feature_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.categorical_column.name + return self.categorical_column.name + + def get_weight_key_name(self): + """get_weight_key_name.""" + if self.is_categorical_column_weighted(): + return self.categorical_column.weight_feature_key + return None + + def get_embedding_var_name(self): + """get_embedding_var_name.""" + return self._shared_embedding_collection_name + + def get_initializer(self): + return self._initializer + + def is_categorical_column_weighted(self): + """Check if the categorical column of the embedding column is weighted.""" + if isinstance( + self.categorical_column, + ( + fc._WeightedCategoricalColumn, # pylint: disable=protected-access + fc_lib.WeightedCategoricalColumn)): + return True + return False + + def _get_dense_tensor_internal( + self, transformation_cache, state_manager): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal( + self, transformation_cache, state_manager) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal( + self, transformation_cache, state_manager) + + # TPU mode + # Get the embeddings from the FeatureTransformationCache. + tensor = transformation_cache.get(self.get_feature_key_name(), + state_manager) + + # Add to collection for _create_tpu_embedding_variables_and_ops + # Note that in Feature Column V2, shared embeddings have no scope. + _record_variable_scope_and_name( + self.get_embedding_var_name(), + self.shared_embedding_column_creator._name, + is_shared_embedding=True) + return tensor + + def get_sequence_dense_tensor( + self, transformation_cache, state_manager): + if tpu.under_tpu_inference_context(): + def host_computation(): + return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor( + self, transformation_cache, state_manager) + + return tpu_replication.outside_compilation(host_computation) + + if _is_running_on_cpu(): + return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor( + self, transformation_cache, state_manager) + + tensor = self._get_dense_tensor_internal( + transformation_cache, state_manager) + tensor_lengths = transformation_cache.get( + self.get_sequence_length_feature_key_name(), + state_manager) + + # FeatureTransformationCache expands rank 1 tensors (like sequence length) + # to rank 2. We need to undo this to match the standard CPU sequence + # embedding. + tensor_lengths = array_ops.squeeze(tensor_lengths, -1) + + return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair( + dense_tensor=tensor, sequence_length=tensor_lengths) + + +def split_sequence_columns_v2(feature_columns): + """Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns. + + For use in a TPUEstimator model_fn function. E.g. + + def model_fn(features): + sequence_columns, feature_columns = ( + tf.tpu.feature_column.split_sequence_columns(feature_columns)) + input = tf.feature_column.input_layer( + features=features, feature_columns=feature_columns) + sequence_features, sequence_lengths = ( + tf.contrib.feature_column.sequence_input_layer( + features=features, feature_columns=sequence_columns)) + + Args: + feature_columns: A list of _TPUEmbeddingColumns to split. + + Returns: + Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the + second is the non-sequence columns. + """ + sequence_columns = [] + non_sequence_columns = [] + for column in feature_columns: + if not isinstance(column, (_TPUEmbeddingColumnV2, + _TPUSharedEmbeddingColumnV2)): + raise TypeError( + 'column must be a _TPUEmbeddingColumnV2 or ' + f'_TPUSharedEmbeddingColumnV2 but got {type(column)} instead.') + if column.is_sequence_column(): + sequence_columns.append(column) + else: + non_sequence_columns.append(column) + return sequence_columns, non_sequence_columns + + +def sparse_embedding_aggregate_slice(params, + values_and_values_mask, + combiner='mean', + name='sparse_embedding_aggregate_slice'): + """Uses XLA's dynamic slice operations to perform embedding lookups. + + From third_party/cloud_tpu/models/movielens/tpu_embedding.py + + Args: + params: Tensor of embedding table. Rank 2 (table_size x embedding dim) + values_and_values_mask: is a two-tuple that contains: values - Tensor of + embedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask + / weights. Rank 2 (batch x n_indices) + combiner: The combiner to use for the embedding lookup. Currently supports + 'sum' and 'mean'. + name: Optional name scope for created ops + + Returns: + Rank 2 tensor of aggregated (per batch element) embedding vectors. + + Raises: + ValueError: Combiner is not supported. + """ + values, values_mask = values_and_values_mask # unpack the two-tuple + with ops.name_scope(name): + _, embedding_dimension = params.get_shape().as_list() + n_batch, n_indices_padded = values.get_shape().as_list() + if not n_batch: + n_batch = -1 + + emb_lookup = array_ops.reshape( + embedding_ops.embedding_lookup( + params, array_ops.reshape(values, [n_batch, n_indices_padded])), + [n_batch, n_indices_padded, embedding_dimension]) + + values_mask_broadcast = array_ops.reshape(values_mask, + [n_batch, n_indices_padded, 1]) + aggregate_emb = math_ops.reduce_sum( + emb_lookup * values_mask_broadcast, axis=1) + if combiner == 'sum': + return aggregate_emb + elif combiner == 'mean': + # In the case we have an empty row, both aggregate_emb and + # math_ops.reduce_sum(values_mask_broadcast, axis=1) will be 0. Thus, + # we can take max it with a non-zero value to prevent NaNs. Note that + # math_ops.reduce_sum(values_mask_broadcast, axis=1) will have integer + # values so 1.0 is the smallest value. + return aggregate_emb / math_ops.maximum( + math_ops.reduce_sum(values_mask_broadcast, axis=1), 1.0) + else: + raise ValueError('Dense TPU Embedding does not support combiner ' + 'other than sum and mean.') + + +def pad_sparse_embedding_lookup_indices(sparse_indices, padded_size): + """Creates statically-sized Tensors containing indices and weights. + + From third_party/cloud_tpu/models/movielens/tpu_embedding.py + + Also computes sparse_indices.values % embedding_table_size, for equivalent + functionality to sparse_column_with_integerized_feature. The returned + padded weight Tensor also doubles as a mask indicating which values in + the returned padded indices Tensor are indices versus padded zeros. + + Args: + sparse_indices: SparseTensor of embedding lookup indices. + padded_size: Number of columns of the returned Tensors. Indices which fall + out of bounds will be truncated to the padded size. + + Returns: + (sparse_indices.values padded to the specified size, + a mask the same size as the returned padded values in which 0s + indicate padded locations and 1s (or values from sparse_weights) + indicate actual values) + """ + batch_size = sparse_indices.dense_shape[0] + sparse_indices = sparse_ops.sparse_slice(sparse_indices, [0, 0], + [batch_size, padded_size]) + indices, values = sparse_indices.indices, sparse_indices.values + + padded_values = array_ops.scatter_nd( + indices, + math_ops.cast(values, dtypes.int32), + shape=(batch_size, padded_size)) + + weights = array_ops.ones_like(values, dtype=dtypes.float32) + padded_mask = array_ops.scatter_nd( + indices, weights, shape=(batch_size, padded_size)) + + return padded_values, padded_mask + + +def _check_invalid_cases(embedding_lookup_device): + """Checks for invalid embedding_lookup_device configurations.""" + if (tpu.under_tpu_inference_context() and + embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE): + raise ValueError( + 'Using embedding_lookup_device=tpu_embedding_core during inference ' + 'is not supported.') + if embedding_lookup_device == EmbeddingDevice.CPU: + if not tpu.under_tpu_inference_context(): + raise ValueError( + 'Using TPUEmbeddingColumn with embedding_lookup_device="cpu" ' + 'during training is not supported.') + + +class _TPUDeviceSpecificEmbeddingColumnV2(_TPUEmbeddingColumnV2): + """TPUEmbeddingColumn which allows serving on TensorCore.""" + + def __new__(cls, *args, **kwargs): + # For __new__, just capture the inference dense shape and call parent. + if 'tensor_core_shape' in kwargs: + cls._tensor_core_shape = kwargs['tensor_core_shape'] + del kwargs['tensor_core_shape'] + if 'embedding_lookup_device' in kwargs: + cls._embedding_lookup_device = kwargs['embedding_lookup_device'] + del kwargs['embedding_lookup_device'] + return _TPUEmbeddingColumnV2.__new__(cls, *args, **kwargs) # pytype: disable=wrong-keyword-args # always-use-return-annotations + + def __init__(self, *args, **kwargs): + # For __init__, just capture the inference dense shape and call parent. + if 'tensor_core_shape' in kwargs: + self._tensor_core_shape = kwargs['tensor_core_shape'] + del kwargs['tensor_core_shape'] + if 'embedding_lookup_device' in kwargs: + self._embedding_lookup_device = kwargs['embedding_lookup_device'] + del kwargs['embedding_lookup_device'] + _TPUEmbeddingColumnV2.__init__(self, *args, **kwargs) + + def __deepcopy__(self, memo): + return _TPUDeviceSpecificEmbeddingColumnV2( + *(copy.deepcopy(a, memo) for a in self.__getnewargs__()), + tensor_core_shape=self._tensor_core_shape, + embedding_lookup_device=self._embedding_lookup_device) + + def create_state(self, state_manager): + _check_invalid_cases(self._embedding_lookup_device) + # CPU case. + is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU + is_cpu = is_cpu or _is_running_on_cpu() + if is_cpu: + return fc_lib.EmbeddingColumn.create_state(self, state_manager) + # TPU_EMBEDDING_CORE case. + elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE: + return super(_TPUDeviceSpecificEmbeddingColumnV2, + self).create_state(state_manager) + + # TPU_EMBEDDING_CORE case. + return fc_lib.EmbeddingColumn.create_state(self, state_manager) + + def get_dense_tensor(self, transformation_cache, state_manager): + """Private method that follows get_dense_tensor.""" + _check_invalid_cases(self._embedding_lookup_device) + # CPU Case. + is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU + is_cpu = is_cpu or _is_running_on_cpu() + if is_cpu: + return super(_TPUDeviceSpecificEmbeddingColumnV2, + self).get_dense_tensor(transformation_cache, state_manager) + # TPU_EMBEDDING_CORE case. + elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE: + return super(_TPUDeviceSpecificEmbeddingColumnV2, + self).get_dense_tensor(transformation_cache, state_manager) + + # TPU_EMBEDDING_CORE cases. + if tpu.under_tpu_inference_context(): + # For inference, use outside compile to densify and pad the input tensors. + sparse_tensor = transformation_cache.get(self.categorical_column.name, + state_manager) + + def host_computation(): + return pad_sparse_embedding_lookup_indices(sparse_tensor, + self._tensor_core_shape[1]) + + values, mask = tpu_replication.outside_compilation(host_computation) + else: + # For training, the inputs should already have been densified and padded. + values = transformation_cache.get(self.categorical_column.name, + state_manager) + mask = transformation_cache.get( + self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX, + state_manager) + embedding_weights = state_manager.get_variable( + self, name='embedding_weights') + return sparse_embedding_aggregate_slice(embedding_weights, (values, mask), + self.get_combiner()) + + def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): + _check_invalid_cases(self._embedding_lookup_device) + # CPU Case. + is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU + is_cpu = is_cpu or _is_running_on_cpu() + if is_cpu: + return super(_TPUDeviceSpecificEmbeddingColumnV2, + self)._get_dense_tensor(inputs, weight_collections, + trainable) + # TPU_EMBEDDING_CORE case. + elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE: + return super(_TPUDeviceSpecificEmbeddingColumnV2, + self)._get_dense_tensor(inputs, weight_collections, + trainable) + + # TPU_EMBEDDING_CORE cases. + if tpu.under_tpu_inference_context(): + # For inference, use outside compile to densify and pad the input tensors. + sparse_tensor = inputs.get(self.get_feature_key_name()) + + def host_computation(): + return pad_sparse_embedding_lookup_indices(sparse_tensor, + self._tensor_core_shape[1]) + + values, mask = tpu_replication.outside_compilation(host_computation) + else: + # For training, the inputs should already have been densified and padded. + values = inputs.get(self.get_feature_key_name()) + mask = inputs.get(self.get_feature_key_name() + + _TENSOR_CORE_MASK_KEY_SUFFIX) + + embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access + if (weight_collections and + ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections): + weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES) + embedding_weights = variable_scope.get_variable( + name='embedding_weights', + shape=embedding_shape, + dtype=dtypes.float32, + initializer=self.initializer, + trainable=self.trainable and trainable, + collections=weight_collections) + return sparse_embedding_aggregate_slice(embedding_weights, (values, mask), + self.get_combiner()) + + +class _TPUSharedDeviceSpecificEmbeddingColumnV2(_TPUSharedEmbeddingColumnV2): + """TPUSharedEmbeddingColumnV2 which allows serving on TensorCore.""" + + def __new__(cls, *args, **kwargs): + # For __new__, just capture the inference dense shape and call parent. + if 'tensor_core_shape' in kwargs: + cls._tensor_core_shape = kwargs['tensor_core_shape'] + del kwargs['tensor_core_shape'] + if 'embedding_lookup_device' in kwargs: + cls._embedding_lookup_device = kwargs['embedding_lookup_device'] + del kwargs['embedding_lookup_device'] + + return _TPUSharedEmbeddingColumnV2.__new__(cls, *args, **kwargs) # pytype: disable=wrong-keyword-args # always-use-return-annotations + + def __init__(self, *args, **kwargs): + # For __init__, just capture the inference dense shape and call parent. + if 'tensor_core_shape' in kwargs: + self._tensor_core_shape = kwargs['tensor_core_shape'] + del kwargs['tensor_core_shape'] + if 'embedding_lookup_device' in kwargs: + self._embedding_lookup_device = kwargs['embedding_lookup_device'] + del kwargs['embedding_lookup_device'] + _TPUSharedEmbeddingColumnV2.__init__(self, *args, **kwargs) + + def __deepcopy__(self, memo): + return _TPUSharedDeviceSpecificEmbeddingColumnV2( + *(copy.deepcopy(a, memo) for a in self.__getnewargs__()), + tensor_core_shape=self._tensor_core_shape, + embedding_lookup_device=self._embedding_lookup_device) + + def _get_dense_tensor_internal(self, transformation_cache, state_manager): + """Private method that follows _get_dense_tensor_internal.""" + _check_invalid_cases(self._embedding_lookup_device) + # CPU Case. + is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU + is_cpu = is_cpu or _is_running_on_cpu() + if is_cpu: + return super(_TPUSharedDeviceSpecificEmbeddingColumnV2, + self)._get_dense_tensor_internal(transformation_cache, + state_manager) + # TPU_EMBEDDING_CORE case. + if self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE: + return super(_TPUSharedDeviceSpecificEmbeddingColumnV2, + self)._get_dense_tensor_internal(transformation_cache, + state_manager) + + # TPU_EMBEDDING_CORE cases. + if tpu.under_tpu_inference_context(): + # For inference, use outside compile to densify and pad the input tensors. + sparse_tensor = transformation_cache.get(self.categorical_column.name, + state_manager) + + def host_computation(): + return pad_sparse_embedding_lookup_indices(sparse_tensor, + self._tensor_core_shape[1]) + + values, mask = tpu_replication.outside_compilation(host_computation) + else: + # For training, the inputs should already have been densified and padded. + values = transformation_cache.get(self.categorical_column.name, + state_manager) + mask = transformation_cache.get( + self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX, + state_manager) + + # Do a dense embedding lookup on TensorCore. + embedding_weights = self.shared_embedding_column_creator.embedding_weights + return sparse_embedding_aggregate_slice(embedding_weights, (values, mask), + self.get_combiner()) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/functional.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..6afdff26fca3ecc41951a4ca3f86a961b9de42ff --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/functional.py @@ -0,0 +1,19 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Functional operations.""" + +from tensorflow.python.tpu.ops import tpu_ops + +TPUPartitionedCall = tpu_ops.tpu_partitioned_call # pylint: disable=invalid-name diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/preempted_hook.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/preempted_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..c9bedb9343e76295ddf4c4db064f65768aebdd1f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/preempted_hook.py @@ -0,0 +1,89 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of the SessionRunHook for preemptible Cloud TPUs.""" + +import logging as _logging +import os +import threading +import time + +from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.training import session_run_hook + + +class CloudTPUPreemptedHook(session_run_hook.SessionRunHook): + """The SessionRunHook for preemptible Cloud TPUs. + + This is an implementation of SessionRunHook for the pre-emptible Google Cloud + TPU service. It attempts to close the session if the TPU is preempted, and + exits the coordinator process if the session cannot be closed. + """ + + def __init__(self, cluster): + self._cluster = cluster + + def after_create_session(self, session, coord): + if tpu_cluster_resolver.is_running_in_gce(): + self._tpu_poller = _TPUPollingThread(self._cluster, session) + self._tpu_poller.start() + + def end(self, session): + self._tpu_poller.stop() + + +class _TPUPollingThread(threading.Thread): + """A thread that polls the state of a TPU node. + + When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED) + that's considered as not recoverable by the underlying infrastructure, + it attempts to close the session, and exits the entire process if the + session.close() stucks. + """ + + def __init__(self, cluster, session): + super(_TPUPollingThread, self).__init__() + + self.daemon = True + self._running = True + self._session_closed = False + self._cluster = cluster + self._session = session + self._interval = 30 + + # Some of the Google API libraries are quite chatty, so disable them. + for name in ['googleapiclient.discovery', 'oauth2client.client']: + _logging.getLogger(name).setLevel(_logging.WARNING) + + def stop(self): + self._running = False + self._session_closed = True + self.join() + + def run(self): + if not tpu_cluster_resolver.is_running_in_gce(): + logging.warning( + 'TPUPollingThread is running in a non-GCE environment, exiting...') + self._running = False + return + + while self._running: + recoverable = self._cluster._cloud_tpu_client.recoverable() # pylint: disable=protected-access + if not recoverable: + logging.warning( + 'TPUPollingThread found TPU %s in state %s', + self._cluster._tpu, self._cluster._cloud_tpu_client.state()) # pylint: disable=protected-access + os._exit(1) # pylint: disable=protected-access + time.sleep(self._interval) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer.py new file mode 100644 index 0000000000000000000000000000000000000000..e8550ddeb1de8708bb7241b59a6453d18856e650 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer.py @@ -0,0 +1,2314 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ======================================================================== +"""A utility to trace tensor values on TPU.""" + +import collections +import hashlib +import operator +import os +import os.path +import sys + +import numpy as np + +from tensorflow.core.framework import summary_pb2 +from tensorflow.python.eager import monitoring +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import function +from tensorflow.python.framework import graph_io +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_util +from tensorflow.python.lib.io import file_io +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_case +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import control_flow_util +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import logging_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_impl +from tensorflow.python.ops import state_ops +from tensorflow.python.ops import string_ops +from tensorflow.python.ops import summary_ops_v2 as summary +from tensorflow.python.ops import variable_scope +from tensorflow.python.platform import analytics +from tensorflow.python.platform import gfile +from tensorflow.python.platform import remote_utils +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.summary import summary_iterator +from tensorflow.python.tpu import tensor_tracer_flags +from tensorflow.python.tpu import tensor_tracer_report +from tensorflow.python.tpu import tpu_replication +from tensorflow.python.tpu.ops import tpu_ops +from tensorflow.python.training import training_util + +_DEVICE_TYPE_TPU = 'tpu' +_DEVICE_TYPE_CPU = 'cpu' +_TRACE_MODE_PART_TENSOR_SIZE = 3 + +_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range' +_REASON_UNSAFE_OP = 'not-traced-unsafe-op' +_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op' +_REASON_CONTROLFLOW_OP = 'not-traced-control-flow-op' +_REASON_IN_CONTROL_FLOW = 'not-traced-in-control-flow' +_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar' +_REASON_SKIP_SCALAR = 'not-traced-scalar' +_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op' +_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch' +_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape' +_REASON_SCALAR_GET_TRACED = 'traced-scalar' +_REASON_TENSOR_GET_TRACED = 'traced-tensor' +_REASON_USER_INCLUDED = 'traced-user-included' +_REASON_USER_EXCLUDED = 'not-traced-user-excluded' +_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path' +_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor' +_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op' + +_OUTPUT_STREAM_ESCAPE = 'file://' +_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables' +TENSOR_TRACER_SUMMARY_COLLECTION = 'tensor_tracer_summary_writers' +_TRACE_FILE_NAME = 'trace.all' +_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.' +_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0 +_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage' +_TT_SNAPSHOT = 'tensor_tracer_snapshot' +_REPLICA_ID_TAG = '#replica-id: ' +_SKIP_REPORT_FILE = 'None' # Do not write report proto if --report_file=None + +_TT_SUMMARY_NORM = tensor_tracer_flags.TT_SUMMARY_NORM +_TT_SUMMARY_MAX = tensor_tracer_flags.TT_SUMMARY_MAX +_TT_SUMMARY_MAX_ABS = tensor_tracer_flags.TT_SUMMARY_MAX_ABS +_TT_SUMMARY_MIN = tensor_tracer_flags.TT_SUMMARY_MIN +_TT_SUMMARY_MEAN = tensor_tracer_flags.TT_SUMMARY_MEAN +_TT_SUMMARY_VAR = tensor_tracer_flags.TT_SUMMARY_VAR +_TT_SUMMARY_SIZE = tensor_tracer_flags.TT_SUMMARY_SIZE +_TT_SUMMARY_SPARSITY = tensor_tracer_flags.TT_SUMMARY_SPARSITY + +_TT_SUMMARY_TAG = 'tensor_tracer_summary' +_TT_TENSORBOARD_PLUGIN_NAME = 'tensor_tracer' +_TT_HOSTCALL_KEY = 'tensor_tracer_host_call' +_TT_EVENT_FILE_SUFFIX = '.tensor_tracer' + +_TT_SUMMARY_MAX_QUEUE = 10 + +tt_gauge = monitoring.BoolGauge('/tensorflow/api/tensor_tracer/v1', + 'tensor tracer usage', 'method') + + +def _graph_summary_tag(graph): + """Generates and returns a summary tag name for the given graph.""" + + if graph is None: + raise RuntimeError('graph is None') + # The chance of collision with md5 is effectively 0. + hash_id = hashlib.md5() + hash_id.update(repr(graph).encode('utf-8')) + # hexdigest() returns a string. + return hash_id.hexdigest() + + +def set_parameters(tensor_tracer_params=None): + """Enables tensor tracer and sets its parameters. + + Example usage: + tensor_tracer_parameters = {'trace_dir': '/usr/tmp/trace_dir', + 'trace_mode': 'norm', + 'report_file': '/usr/tmp/trace_dir/report.all'} + tensor_tracer.set_parameters(tensor_tracer_parameters) + + This sets up the parameters for tensor tracer. A call to tensor tracer as + below is necessary to enable debugging on CPUs and GPUs. On TPUs below can be + skipped as this call is hooked into tpu.rewrite. + tt = tensor_tracer.TensorTracer() + loss = tt.trace_cpu(tf.get_default_graph(), tensor_fetches=loss) + + Args: + tensor_tracer_params: Tensor tracer parameter dictionary. Below gives + examples of these parameters: See tensor_tracer_report.py for all + parameters. + - enable: If set, tensor tracer will be enabled. Calling + enable_tensor_tracer automatically adds this parameters. + - trace_mode: The trace_mode to be used by tensor tracer. These include: + - summary: Collects multiple statistics for traced tensors, and writes + them a summary file that can be visualized using tensorboard. This + mode currently only works for TPUEstimator. It can be also be used + for other models, but outfeed must be handled by the user. + - norm: Collects norm of each traced tensor and writes them into a + text file pointed by 'trace_dir' flag. (Default mode). + - nan-inf: Checks the existince of NaNs and Infs in the tensor, and + writes a boolean value to a text file pointed by 'trace_dir' flag. + Note that 'norm' mode can also capture this information with more + numerical info. + - max-abs: Collects the absolute max for each traced tensors and + writes it into a text file pointed by 'trace_dir' flag. + - full-tensor: Writes the full tensor content of the traced tensors + into a text file pointed by 'trace_dir' flag. + - part-tensor: Writes a part of the tensor content of the traced + tensors into a text file pointed by 'trace_dir' flag. + - full_tensor_summary: Writes the full tensors as binary event files. + The outputs can be read using: trace = + tensor_tracer.read_tensor_tracer_event_file(event_file_path) + + - report_file: Path to the metadata file that is written during graph + construction. If not set, metadata will be printed to stdout during + graph construction. + - trace_dir: Path where the execution traces will be written during the + graph execution. If not set, trace will be printed to stderr. + - trace_level: Tensor tracer aims to trace everything it can. This + introduces some overhead on graph execution and graph compilation + times. Using trace_level parameter, it is possible to trace operation + based on their priorities. For example, - trace_level=7 is the highest + trace_level, in which every op is traced. - trace_level=6 will skip + constant operations such as tf.constant. - trace_level=5 will skip + less important ops such as tf.identities. - The default trace_level=3, + that will skip concat ops, or random number generators. - To reduce + the graph compile time overhead, trace_level can be set to 0, that + will skip additions, and substractions, and multiplications as well. + - excluded_opnames: If set, any matching op name will not be traced. + excluded_opnames can be set as a regular expression. E.g, + excluded_opnames=.* will exclude everything. + - excluded_optypes: If set, any matching op type will not be traced. + excluded_optypes can be set as a regular expression. E.g, + excluded_optypes=.* will exclude everything. excluded_optypes=MatMul + will exclude all MatMul ops from tracing. + - included_opnames: If set, any matching op name will be forced to be + traced. included_opnames can be set as a regular expression. E.g, + '--included_opnames=some_op --excluded_opname=*.' will only trace + some_op. + - included_optypes: If set, any matching op type will be forced to be + traced. included_optypes can be set as a regular expression. E.g, + '--included_optypes=some_op_type --excluded_optypes=*.' will trace + only the ops with type 'some_op_type' + - flush_summaries: If summary mode is used, flush_summaries=1 will + flush summaries using outside compilation. Note that, if used with + low level APIs, flush_summaries=1 is necessary to obtain results. + Advanced Flags: + - trace_scalar: Scalar values are not traced by default. If this flag is + set, scalar values will also be traced. + - op_range: In the form of '%d:%d' that limits the tracing to the ops + within this limit. --op_range='5:10' will trace only the ops that have + topological order between 5-10. + - submode: 'brief' or 'detailed'. If the trace mode is not compact, + brief mode will print only the id of each traced tensor to save some + space. 'detailed' mode prints the full tensor name. + - use_fingerprint_subdirectory: The trace directory will be chosen as + using the fingerprint of the trace metadata under the provided + trace_dir. + """ + enable_flags = '--%s=1' % tensor_tracer_flags.FLAG_NAME_ENABLE + if tensor_tracer_params: + for key, value in tensor_tracer_params.items(): + enable_flags += ' --%s=%s' % (key, value) + os.environ[tensor_tracer_flags.FLAGS_ENV_VAR] = enable_flags + + +def op_priority(op_type): + """Returns the priority of the op. + + If the priority of the op is k, it will be traced if trace_level>=k. + Args: + op_type: String name of the operation type. + Returns: + Integer value corresponding the priority of the op. + """ + if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range', + 'VariableShape', 'Fill', 'OneHot', 'ShapeN'): + # Lowest priority ops, e.g., constant ops across different steps, + # They will be traced only if trace_level>=7 + return 7 + + if op_type in ('Identity', 'Cast', 'Reshape', 'ExpandDims', 'StopGradient', + 'PreventGradient', 'Squeeze', 'Gather', 'GatherNd'): + # Operations without numerical effects. + # They will be only if trace_level>=6 + return 6 + if op_type in ('ConcatV2', 'Concat', 'StridedSlice', 'Slice', 'Pack', 'Tile', + 'CollectivePermute', 'SplitV', 'DynamicPartition'): + # Operations that merge or slice an input, will be traced if trace_level>=5 + return 5 + if op_type in ('Pad', 'RandomUniformInt', 'GreaterEqual'): + # Operations less likely to provide useful information, + # will be traced if trace_level>=4 + return 4 + if op_type in ('Sum', 'AddV2', 'Add', 'AddN', 'BiasAdd', 'CrossReplicaSum'): + # Add operations that are less likely create any issues, will be traced + # if trace_level>=3 (default=3) + return 3 + if op_type in ('Neg', 'Sub'): + # Sub operations that are less likely create any issues, will be traced + # trace_level>=2 + return 2 + if op_type in ('Mul', 'Square', 'MatMul', 'RandomUniform', 'Select', + 'Maximum', 'Mean', 'Variance', 'Exp', 'Rsqrt'): + # Multiplication and some other operations, will be traced if trace_level>=1 + return 1 + + # Unclassified op_types default to being traced at level 2 and above. + return 2 + + +def read_tensor_tracer_event_file(event_file): + """Reads the event file written by tensor tracer. + + This can be used to read the full tensors written into binary event files by + by TensorTracer with trace_mode=full_tensor_summary. + + Example usage: + result_dict_list = tensor_tracer.read_tensor_tracer_event_file( + event_file_path) + for result_dict in result_dict_list: + for step, tensor_dict in result_dict.items(): + for tensor_name, full_tensor_content in tensor_dict.items(): + logging.info(tensor_name, full_tensor_content) + + Args: + event_file: Path to the event file that contains only tensor tracer events. + Returns: + A list of event dictionaries, each of which with the form: + {step_number: {tensor_name: tensor_content}}. This is a list instead of + a single event dictionary because it is possible that an event file may + have multiple event traces, each of them covering the same step ranges. + Raises: + ValueError: If an unexpected trace is found. + """ + + # Keeps track of how many times that a step number shows up in these events. + step_occurrence_count = collections.defaultdict(int) + + # List of step occurrences. + step_occurrence_list = [] + + for trace_event in summary_iterator.summary_iterator(event_file): + # First event is an event with file_version: "brain.Event:2" + if not trace_event.HasField('summary'): + continue + if len(trace_event.summary.value) != 1: + raise ValueError('Single step contains %d summary values,' + ' expected 1.' % len(trace_event.summary.value)) + step = trace_event.step + step_occurrence_count[step] += 1 # a new occurrence for this step. + + occurrence_idx = step_occurrence_count[step] - 1 + occurrence_size = len(step_occurrence_list) + + if occurrence_idx == occurrence_size: + # This particular occurrence isn't yet recorded on step_occurrence_list. + # So append this new occurrence to the end of step_occurrence_list. + new_occurrence = collections.defaultdict(dict) + step_occurrence_list.append(new_occurrence) + else: + # This particular occurrence must be already recorded on + # step_occurrence_list (i.e. occurrence_idx < occurrence_size). + if occurrence_idx > occurrence_size: + raise ValueError('Unexpected: occurrence_idx (%d) > ' + 'occurrence_size (%d)' % (occurrence_idx, + occurrence_size)) + tensor_value = trace_event.summary.value[0] + tensor_name = tensor_value.tag + + real_shape = [d.size for d in tensor_value.tensor.tensor_shape.dim] + tensor_content = np.frombuffer( + tensor_value.tensor.tensor_content, + dtypes.DType(tensor_value.tensor.dtype).as_numpy_dtype() + ).reshape(real_shape) + step_occurrence_list[occurrence_idx][step][tensor_name] = tensor_content + return step_occurrence_list + + +def trace_tensor(tensor, tracepoint_name=None): + """Programmatic interface to trace a tensor with Tensor Tracer. + + Tensor Tracer, by default, traces all tensors in the execution. This function + can be used to limit traced tensors. If this function is called for a subset + of the tensors, only those will be traced. + + For example, Tensor Traacer will only trace c below. + c = tf.MatMul(a, b) + tensor_tracer.trace_tensor(c) + d = tf.add(c, 1) + Args: + tensor: the tensor object for which the tracing is requested. + tracepoint_name: an optional tensor tracepoint name string. A tracepoint + name is an Tensor Tracer internal name for the tensor. It is useful when + comparing equivalent traces from different models that have different + tensor namings. Equivalent tensors (with different names) can be mapped + to each other by assigning a common tracepoint_name. + + Returns: + The provided tensor. + """ + if tracepoint_name is None: + tracepoint_name = tensor.name + tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION) + tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION, + (tensor, tracepoint_name)) + return tensor + + +def keras_layer_tracepoint(layer, checkpoint_name): + """An interface for adding the tensor outputs of a keras layer. + + Encapsulates trace_tensor. + + Args: + layer: A keras layer. + checkpoint_name: a string name for the checkpoint. This name has to be a + unique name if used within model comparison. The tensors that have the same + checkpoint identifier is compared in model comparison. + + Returns: + The provided layer. + """ + try: + outputs = layer.output + if tensor_util.is_tf_type(outputs): + trace_tensor(outputs, '%s' % (checkpoint_name)) + else: + idx = 0 + for output_tensor in outputs: + if tensor_util.is_tf_type(outputs): + trace_tensor(output_tensor, '%s_%d' % (checkpoint_name, idx)) + idx += 1 + except AttributeError: + pass + except RuntimeError: + pass + return layer + + +class TensorTracer: + """A software construct for tracing tensor values in a TF graph. + + This utility is disabled by default. It is hooked into tpu.rewrite, so it can + easily be enabled on TPUs by setting the TENSOR_TRACER_FLAGS env variable as + below without a code change. + export TENSOR_TRACER_FLAGS="--enable=1" + + Below is the use example to enable it on CPUs or GPUs, or for more advance use + cases on TPUs. + + a = x + 1 + b = a * 2 + rs = tf.reduce_sum(b) + tensor_tracer.set_parameters({'trace_dir': 'path/to/trace_dir', + 'report_file: 'path/to/report/file'}) + tt = tensor_tracer.TensorTracer() + if on_tpu: + rs = tt.trace_tpu(tf.get_default_graph(), + tensor_fetches=rs) + else: + rs = tt.trace_cpu(tf.get_default_graph(), + tensor_fetches=rs) + session.run(rs) + + If it is enabled, it will trace the output tensor values of + selected Ops in the graph. It has two outputs: (1) the traces and (2) + a report. The traces are dumped to a specified directory during the graph + execution, while the report is dumped during the graph construction. + By passing options via the env variable, users can change: + (1) the trace mode (e.g., detecting NaN/Inf, printing partial or + full tensor values) + (2) which Ops to be traced (via op.name or op.type) + (3) output trace file path. + + """ + # The set of graphs that are rewritten by tensor tracer. + _traced_graphs = set() + + @staticmethod + def is_enabled(): + """Returns True if TensorTracer is enabled.""" + try: + enable = tensor_tracer_flags.TTParameters().is_enabled() + # Add metrics to determine API usage. + if enable: tt_gauge.get_cell('is_enabled').set(True) + return enable + except (ValueError, RuntimeError) as e: + logging.warning( + 'Tensor Tracer V1 flags processing error encountered in is_enabled ' + 'check. %s', e) + # TODO(b/210212559): Find a more robust fix. + # Should only produce exception if Tensor Tracer is enabled. + return True + + @staticmethod + def check_device_type(device_type): + """Checks if the given device type is valid.""" + + if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU): + raise ValueError('Invalid device_type "%s"'%device_type) + + @staticmethod + def check_trace_mode(device_type, trace_mode): + """Checks if the given trace mode work on the given device type. + + Args: + device_type: Device type, TPU, GPU, CPU. + trace_mode: Tensor tracer trace mode. + Raises: + ValueError: If the given trace mode is not supported for the device. + """ + if trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY: + if device_type != _DEVICE_TYPE_TPU: + raise ValueError('Device_type "%s" is not yet supported for ' + 'trace mode "%s"' % (device_type, trace_mode)) + + @staticmethod + def loop_cond_op(op): + return op.type in ('LoopCond', 'RefLoopCond') + + @staticmethod + def while_loop_op(op): + """Returns true if op is one of the special ops of in a while loop. + + Args: + op: A tf.Operation. + + Returns: + True if the given op is one of [Switch, Merge, Enter, Exit, + NextIteration, LoopCond], which are all building blocks for TF while + loops. + """ + return (control_flow_util.IsLoopSwitch(op) or + control_flow_util.IsLoopMerge(op) or + control_flow_util.IsLoopEnter(op) or + control_flow_util.IsLoopExit(op) or + TensorTracer.loop_cond_op(op) or + op.type in ('RefNextIteration', 'NextIteration')) + + @staticmethod + def control_flow_op(op): + """Returns true if op is one of the special ops of in a while loop. + + Args: + op: A tf.Operation. + + Returns: + True if the given op is one of [Switch, Merge, Enter, Exit, + NextIteration, LoopCond], which are all building blocks for TF while + loops. + """ + return (control_flow_util.IsSwitch(op) or + control_flow_util.IsMerge(op)) + + @staticmethod + def unsafe_op(op): + """Returns True if this op is not safe to be traced.""" + + # Reasons for not including following op types: + # Assign: cause incorrect result with CPU tracing. + if op.type == 'Assign': + return True + return False + + @staticmethod + def device_mismatch(device_type, op): + if device_type == _DEVICE_TYPE_TPU: + # pylint: disable=protected-access + return tpu_replication._TPU_REPLICATE_ATTR not in op.node_def.attr + # pylint: enable=protected-access + return False + + @staticmethod + def unsafe_scalar_trace(op): + """Return true if scalar output tensor from Op is not safe to be traced.""" + + # Tracing the following causes cycle in the graph on TPU. + if op.type in ('LoopCond', 'Enter', 'Merge', 'Const', + 'Switch', 'Less', 'ReadVariableOp'): + return True + # Tracing the following will cause casting-issue + # with the norm tracing mode or other compilation issues on CPU. + if op.type in ('VarHandleOp', 'IteratorToStringHandle', + 'IteratorGetNext', 'OneShotIterator', + 'IteratorV2', 'MakeIterator', + 'BatchDatasetV2', 'MapDataset', + 'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset', + 'Placeholder', 'PlaceholderWithDefault', 'StridedSlice'): + return True + return False + + def _is_interesting_op(self, op): + """Returns True if the given op is not an interesting one to be traced.""" + return op_priority(op.type) <= self._parameters.trace_level + + @staticmethod + def reason(op_idx, details): + """Returns reason why the Op at op_idx is traced or not.""" + + return '%d %s'%(op_idx, details) + + def __init__(self): + """Initializes a TensorTracer. + + Sets the various member fields from the flags (if given) or the defaults. + """ + self._replica_id = None + self._tt_config = tensor_tracer_report.TensorTracerConfig() + self._parameters = tensor_tracer_flags.TTParameters() + self._host_call_fn = {} + # _cache_variables is a dict (key = graph, value = dicts + # (key = name, value = tensors)) + self._cache_variables = {} + self._history_value_cache = {} + + self._traced_op_names = set() + self._report_proto = None + # _temp_cache_var is a dict (key = graph, value = []) + self._temp_cache_var = {} + self._report_proto_path = '' + self._outmost_context = None + + def report_proto(self): + """Getter for tensor_tracer.proto object for summary and full_tensor_summary modes. + + Returns: + A tensor_tracer.proto object. + Raises: + ValueError if called before tracing happens, or when trace mode is not + summary or full_tensor_summary. + """ + if self._report_proto: + return self._report_proto + else: + raise ValueError('Call to report_proto must be done after tracing.' + 'Report proto only exists for ' + 'trace_mode=[summary|full_tensor_summary]') + + def report_proto_path(self): + """Getter for path where tensor_tracer.proto object should be written. + + Returns: + A string path. + """ + return self._report_proto_path + + def _escape_namescopes(self, variable_name): + return variable_name.replace('/', '_').replace(':', '_') + + def _cache_variable_for_graph(self, graph): + if graph not in self._cache_variables: + self._cache_variables[graph] = {} + return self._cache_variables[graph] + + def _create_or_get_tensor_history_values_cache(self, + cache_name, + graph, + shape=None, + dtype=dtypes.float32): + """Creates a variable as the cache to store historic intermediate tensor values. + + Args: + cache_name: Name to be given to the cache (an instance of tf.variable). + graph: Tensorflow graph. + shape: A list of dimensions. + dtype: Data type of created cache. + Returns: + A ref to newly created or existing cache with the given dimensions. + Raises: + ValueError: + (1) If graph is None, or + (2) shape is None when a new cache needs to be created. + """ + if graph is None: + raise ValueError('Invalid graph.') + + if graph not in self._history_value_cache: + self._history_value_cache[graph] = {} + + if cache_name not in self._history_value_cache[graph]: + if shape is None: + raise ValueError('shape must be provided at cache creation.') + if dtype.is_integer: + init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE) + else: + init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE + + # Create in proper graph and base name_scope. + with graph.as_default() as g, g.name_scope(None): + self._history_value_cache[graph][ + cache_name] = variable_scope.get_variable( + 'tt_history' + '_' + self._escape_namescopes(cache_name), + shape=shape, + dtype=dtype, + initializer=init_ops.constant_initializer(init_val), + trainable=False, + use_resource=True, + collections=[ + _TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES + ]) + + return self._history_value_cache[graph][cache_name] + + def _create_or_get_tensor_values_cache(self, cache_name, graph, + shape=None, dtype=dtypes.float32): + """Creates a variable as the cache to store intermediate tensor values. + + Args: + cache_name: Name to be given to the cache (an instance of tf.variable). + graph: Tensorflow graph. + shape: A list of dimensions. + dtype: Data type of created cache. + Returns: + A ref to newly created or existing cache with the given dimensions. + Raises: + ValueError: + (1) If graph is None, or + (2) shape is None when a new cache needs to be created. + """ + if graph is None: + raise ValueError('Invalid graph.') + + graph_cache_var = self._cache_variable_for_graph(graph) + + if cache_name not in graph_cache_var: + if shape is None: + raise ValueError('shape must be provided at cache creation.') + if dtype.is_integer: + init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE) + else: + init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE + + # Create in proper graph and base name_scope. + with graph.as_default() as g, g.name_scope(None): + graph_cache_var[cache_name] = variable_scope.get_variable( + _TT_SNAPSHOT + '_' + self._escape_namescopes(cache_name), + shape=shape, dtype=dtype, + initializer=init_ops.constant_initializer(init_val), + trainable=False, + use_resource=True, + collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES]) + return graph_cache_var[cache_name] + + def _add_replica_id_to_graph(self): + """Adds nodes for computing the replica ID to the graph.""" + + if self._tt_config.num_replicas: + with ops.control_dependencies(None): + # Uses None as dependency to run outside of TPU graph rewrites. + self._replica_id = tpu_ops.tpu_replicated_input( + list(range(self._tt_config.num_replicas)), + name='tt_replica_id') + else: + self._replica_id = 'unknown' + + def _inside_op_range(self, idx): + """Return True if the given index is inside the selected range.""" + + if idx < self._parameters.op_range[0]: + return False + return (self._parameters.op_range[1] < 0 or + idx <= self._parameters.op_range[1]) + + def _is_user_included_op(self, op): + """Checks whether the op is included in the tensor tracer flags. + + Args: + op: tf Operation + Returns: + True, if the op is included. + An op is included if: + - Its op name is given in included_opnames + - Its op type is given in included_optypes + - The op is at most _trace_ops_before_included hops before an included op + - The op is at most _trace_ops_after_included hops after an included op + """ + for opname_re in self._parameters.included_opname_re_list: + if opname_re.match(op.name): + return True + + for optype_re in self._parameters.included_optype_re_list: + if optype_re.match(op.type): + return True + return False + + def _is_user_excluded_op(self, op): + for opname_re in self._parameters.excluded_opname_re_list: + if opname_re.match(op.name): + return True + for optype_re in self._parameters.excluded_optype_re_list: + if optype_re.match(op.type): + return True + return False + + def _signature_types(self): + """Returns a dictionary holding the order of signatures in the cache for the selected trace mode.""" + if self._parameters.trace_mode in set([ + tensor_tracer_flags.TRACE_MODE_NAN_INF, + tensor_tracer_flags.TRACE_MODE_NORM, + tensor_tracer_flags.TRACE_MODE_HISTORY, + tensor_tracer_flags.TRACE_MODE_MAX_ABS]): + return {self._parameters.trace_mode: 0} + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY: + return self._parameters.summary_signatures + return {} + + def _num_signature_dimensions(self): + return len(self._signature_types()) + + def _use_temp_cache(self): + """Returns true if the intermediate values should be stacked instead of being stored in a tf.Variable. + + Returns: + A boolean, denoting whether to use a temporary cache or not. + """ + # If full tensors need to be stored tf.variables, then do not use temp + # variables to store them. + if self._use_tensor_buffer(): + return False + if self._use_tensor_values_cache(): + return self._parameters.use_temp_cache_var + else: + # Temporary caches only replaces tf.Variables caches. If no cache is used + # return False. + return False + + def _use_tensor_values_cache(self): + """Returns True if immediate tensors should be first saved to a cache.""" + return self._parameters.use_compact_trace + + def _use_tensor_buffer(self): + """Returns true if the whole tensor needs to be cached/buffered in memory.""" + return (self._parameters.trace_mode == + tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY) + + def _merge_tensor_signatures(self, signatures): + """Returns a tensor that merges the given signatures. + + Args: + signatures: A dictionary of the signature updates from signature name to + a tensor of dimension [1]. + Returns: + A tensor that concats the signature values in a predefined order. + Raises: + ValueError: Unable to merge signatures. + """ + sorted_update = [] + if self._num_signature_dimensions() > 1: + signature_indices = self._signature_types() + for _, val in sorted(signatures.items(), + key=lambda item: signature_indices[item[0]]): + sorted_update.append(val) + updates = array_ops_stack.stack( + sorted_update, axis=0, name='merge_single_op_signatures') + elif self._num_signature_dimensions() == 1: + # Avoid stack operation if there is only a single signature. + (_, val), = signatures.items() + updates = val + else: + raise ValueError('Cannot merge 0 signatures. Check the value passed for ' + 'flag --signatures.') + return updates + + def _save_tensor_value_to_tmp_cache(self, cache_idx, updates, graph): + """Returns an op that will save the given updates to an entry in the cache. + + Args: + cache_idx: The cache index of the tensor within the cache. + updates: A dictionary of the signature updates from signature name to + a tensor of dimension [1]. + graph: A TensorFlow graph. + Raises: + RuntimeError: + (1) graph is not already in self._temp_cache_var, or + (2) cache_idx is out of range. + """ + updates = self._merge_tensor_signatures(updates) + updates = array_ops.reshape(updates, + [self._num_signature_dimensions()]) + if graph not in self._temp_cache_var: + raise RuntimeError('graph is not in self._temp_cache_var') + if cache_idx >= len(self._temp_cache_var[graph]): + raise RuntimeError('cache_idx (%d) is out of range (%d)' % ( + cache_idx, len(self._temp_cache_var[graph]))) + self._temp_cache_var[graph][cache_idx] = updates + + def _save_tensor_value_to_cache_op(self, cache_idx, updates, graph): + """Returns an op that will save the given updates to an entry in the cache. + + Args: + cache_idx: The cache index of the tensor within the cache. + updates: A dictionary of the signature updates. + graph: A TensorFlow graph. + Returns: + Cache update operation. + """ + # state_ops.scatter_update allows updates only along the first dimension. + # Make a compact array by concatenating different signatures, and update + # them all together. + updates = self._merge_tensor_signatures(updates) + updates = array_ops.reshape(updates, + [1, self._num_signature_dimensions()]) + indices = constant_op.constant([cache_idx]) + cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph) + return state_ops.scatter_update(cache, indices, updates).op + + def _snapshot_tensor(self, tensor): + """Creates a new tf.Variable and a new tf.Operation that assigns the value of the tensor to this variable. + + Args: + tensor: tensor whose values will be stored in a new tf.Variable. + Returns: + An assignment operation. + """ + + snapshot_variable = self._create_or_get_tensor_values_cache( + tensor.name, tensor.op.graph, + tensor.shape.as_list(), tensor.dtype) + return state_ops.assign(snapshot_variable, tensor).op + + def _preprocess_traced_tensor(self, tensor): + """Computes NAN/Norm/Max on TPUs before sending to CPU. + + Args: + tensor: The tensor to be traced. + Returns: + A tensor that should be input to the trace_function. + Raises: + RuntimeError: If the signature is invalid. + """ + + def _detect_nan_inf(tensor): + """Trace function for detecting any NaN/Inf in the tensor.""" + + if tensor.dtype.is_floating: + mask = math_ops.reduce_any( + gen_math_ops.logical_or( + gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor))) + output_tensor = cond.cond( + mask, + lambda: constant_op.constant([1.0]), + lambda: constant_op.constant([0.0])) + else: + output_tensor = constant_op.constant([0.0]) + return output_tensor + + def _compute_signature(tensor, tf_op, cast_to_f32=True): + if cast_to_f32: + tensor = math_ops.cast(tensor, dtypes.float32) + output_tensor = tf_op(tensor) + # Return type should be scalar. Set it if it does not have the + # information. + if not output_tensor.get_shape().is_fully_defined(): + output_tensor = array_ops.reshape(output_tensor, []) + return output_tensor + + def _show_size(tensor): + # In order to check the size of a tensor. + # Not all sizes are known at the compile time, also, different replicas + # sometimes get different sizes of tensors. + # Collect it here to be used in merging replica data. + tsize = _compute_signature(tensor, array_ops.size, cast_to_f32=False) + # Cast to float32, so that it can be placed into same cache with other + # signatures. + return math_ops.cast(tsize, dtypes.float32) + + def _show_max(tensor, cast_to_f32=True): + # returns -inf for empty tensor + return _compute_signature(tensor, math_ops.reduce_max, cast_to_f32) + + def _show_min(tensor, cast_to_f32=True): + # returns inf for empty tensor + return _compute_signature(tensor, math_ops.reduce_min, cast_to_f32) + + def _show_norm(tensor, cast_to_f32=True): + # returns 0 for empty tensor + return _compute_signature(tensor, linalg_ops.norm, cast_to_f32) + + def _show_sparsity(tensor, cast_to_f32=True, tolerance=1e-06): + # returns nan for empty tensor and treats nans as non-zero numbers + def sparsity_fn(tensor): + non_zeros = math_ops.greater_equal(math_ops.abs(tensor), tolerance) + nans = math_ops.is_nan(tensor) + return nn_impl.zero_fraction(math_ops.logical_or(non_zeros, nans)) + + return _compute_signature(tensor, sparsity_fn, cast_to_f32) + + def _show_mean_and_variance(tensor, cast_to_f32=True): + """Returns the mean and variance of the given tensor.""" + if cast_to_f32: + tensor = math_ops.cast(tensor, dtypes.float32) + # returns nan for empty tensor + mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0]) + # The shape has to be 1. Set it if it does not have the information. + if not mean.get_shape().is_fully_defined(): + mean = array_ops.reshape(mean, []) + if not var.get_shape().is_fully_defined(): + var = array_ops.reshape(var, []) + return mean, var + + def _show_max_abs(tensor, cast_to_f32=True): + return _compute_signature( + tensor, lambda t: math_ops.reduce_max(math_ops.abs(t)), cast_to_f32) + + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF: + return {self._parameters.trace_mode: _detect_nan_inf(tensor)} + if (self._parameters.trace_mode == + tensor_tracer_flags.TRACE_MODE_PART_TENSOR): + return {self._parameters.trace_mode: tensor} + if (self._parameters.trace_mode in ( + tensor_tracer_flags.TRACE_MODE_FULL_TENSOR, + tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)): + return {self._parameters.trace_mode: tensor} + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NORM: + return {self._parameters.trace_mode: array_ops.reshape( + _show_norm(tensor), [1])} + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_HISTORY: + return {self._parameters.trace_mode: array_ops.reshape( + _show_norm(tensor), [1])} + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_MAX_ABS: + return {self._parameters.trace_mode: _show_max_abs(tensor)} + + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY: + tensor = math_ops.cast(tensor, dtypes.float32) + result_dict = {} + # Call mean and variance computation here to avoid adding the same nodes + # twice. + if (_TT_SUMMARY_MEAN in self._signature_types() or + _TT_SUMMARY_VAR in self._signature_types()): + mean, variance = _show_mean_and_variance(tensor, cast_to_f32=False) + + for signature_name, _ in sorted(self._signature_types().items(), + key=lambda x: x[1]): + if signature_name == _TT_SUMMARY_NORM: + signature_result_tensor = _show_norm(tensor, cast_to_f32=False) + elif signature_name == _TT_SUMMARY_MAX: + signature_result_tensor = _show_max(tensor, cast_to_f32=False) + elif signature_name == _TT_SUMMARY_MAX_ABS: + signature_result_tensor = _show_max_abs(tensor, cast_to_f32=False) + elif signature_name == _TT_SUMMARY_MIN: + signature_result_tensor = _show_min(tensor, cast_to_f32=False) + elif signature_name == _TT_SUMMARY_SPARSITY: + signature_result_tensor = _show_sparsity(tensor) + elif signature_name == _TT_SUMMARY_SIZE: + signature_result_tensor = _show_size(tensor) + elif signature_name == _TT_SUMMARY_MEAN: + signature_result_tensor = mean + elif signature_name == _TT_SUMMARY_VAR: + signature_result_tensor = variance + else: + raise ValueError('Unknown signature type :%s.' % signature_name) + + result_dict[signature_name] = signature_result_tensor + return result_dict + + raise RuntimeError( + 'Unsupported signature for trace mode %s.' + % self._parameters.trace_mode) + + def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order): + """Makes the tensor tracing function called by outside compilation. + + Args: + tensor_name: name of the tensor being traced. + tensor_trace_order: TensorTraceOrder object holding tensorname to id map. + Returns: + A function to be passed as the first argument to outside compilation. + + Raises: + RuntimeError: If the trace mode is invalid. + """ + + def _print_tensor(tensor_name, num_elements, tensor, output_tensor): + """Prints a tensor value to a file. + + Args: + tensor_name: name of the tensor being traced. + num_elements: number of elements to print (-1 means print all). + tensor: the tensor needs to be returned. + output_tensor: the tensor needs to be printed. + + Returns: + The same tensor passed via the "tensor" argument. + + Raises: + ValueError: If tensor_name is not already in + tensor_trace_order.tensorname_to_cache_idx. + """ + + if self._parameters.is_brief_mode(): + if tensor_name not in tensor_trace_order.tensorname_to_cache_idx: + raise ValueError( + 'Tensor %s with name %s is not in the tensorname_to_cache_idx' % + (tensor, tensor_name)) + msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name] + else: + msg = '"%s"' % tensor_name + + if self._parameters.trace_dir: + output_path = os.path.join( + self._parameters.trace_dir, + _TRACE_FILE_NAME + self._get_outfile_suffix()) + output_stream = _OUTPUT_STREAM_ESCAPE + output_path + else: + output_stream = sys.stderr + return logging_ops.print_v2(msg, array_ops.shape(output_tensor), + '@', self._replica_id, + '\n', output_tensor, '\n', + summarize=num_elements, + output_stream=output_stream) + + def _show_part_tensor(tensor): + """Trace function for printing part of the tensor.""" + + return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE, + tensor, tensor) + + def _show_full_tensor(tensor): + """Trace function for printing the entire tensor.""" + + return _print_tensor(tensor_name, -1, tensor, tensor) + + if (self._parameters.trace_mode == + tensor_tracer_flags.TRACE_MODE_PART_TENSOR): + return _show_part_tensor + # The input tensor has a shape of "[1]" for TRACE_MODE_NAN_INF, + # TRACE_MODE_NORM, and TRACE_MODE_MAX_ABS, as related computations are + # performed within TPUs and only their results are transferred to CPU. + # Simply, print the full tensor for these trace modes. + if self._parameters.trace_mode in ( + tensor_tracer_flags.TRACE_MODE_NAN_INF, + tensor_tracer_flags.TRACE_MODE_NORM, + tensor_tracer_flags.TRACE_MODE_FULL_TENSOR, + tensor_tracer_flags.TRACE_MODE_MAX_ABS, + tensor_tracer_flags.TRACE_MODE_SUMMARY, + tensor_tracer_flags.TRACE_MODE_HISTORY + ): + return _show_full_tensor + + raise RuntimeError('Full tensor support is not available with trace mode %s' + %self._parameters.trace_mode) + + def _is_in_control_flow(self, op): + """Returns true if the given op is inside a tf.cond or in tf.while_loop. + + Args: + op: A tensorflow op that should be checked whether in control flow or not. + Returns: + A boolean value whether the op is in control flow or not. + """ + return control_flow_util.IsInCond(op) + + def _is_in_outmost_while_loop(self, op): + """Returns true if the op is at the same level with the training loop. + + Returns false if the op is in an inner while loop or if it is outside of the + training loop. + Args: + op: tf.Operation + + Returns: + A boolean. + """ + ctxt = self._get_op_control_flow_context(op) + outer_while_context = control_flow_util.GetContainingWhileContext(ctxt) + return outer_while_context == control_flow_util.GetContainingWhileContext( + self._outmost_context) + + def _should_trace_in_control_flow(self): + """Returns false incase it is not safe to trace ops in tf.cond or tf.while_loop.""" + # As different from the other trace modes, TRACE_MODE_OPTIONAL_SUMMARY + # forces the execution of the traced tensors. We should not trace the ops + # that may not be executed due to control flow. + if self._use_temp_cache(): + return False + elif self._tt_config.device_type == _DEVICE_TYPE_TPU: + # On TPUs do not trace in control flow unless we use caches to store + # intermediate values as calling outside compilation within an inner loop + # causes errors. + return self._use_tensor_values_cache() or self._use_tensor_buffer() + return True + + def _skip_op(self, op_id, op, ops_in_exec_path, report_handler): + """Returns True if we should not trace Op. + + Args: + op_id: Topological index of the op. + op: tf.Operation + ops_in_exec_path: Set of operations that are in the execution path. + report_handler: An instance of tensor_tracer_report.TTReportHandle. + Returns: + True if the op should not be traced, false otherwise. + """ + if TensorTracer.while_loop_op(op): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP)) + return True + if TensorTracer.control_flow_op(op): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_CONTROLFLOW_OP)) + return True + if TensorTracer.unsafe_op(op): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP)) + return True + if TensorTracer.device_mismatch(self._tt_config.device_type, op): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH)) + return True + if op not in ops_in_exec_path: + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED)) + return True + # TensorTracer will not trace the operations that are in an inner while loop + # or tf.cond when a temporary cache is used. Temporary cache adds direct + # data dependencies to traced operations, and needs a static number of + # traced operations. For these cases, + # - We do not know the number of slots required when there are inner while + # loops. TensorTracer can only trace the result of a while loop. + # - We do not know ahead of time which branch of the tf.cond + # will be taken, so we avoid introducing data dependencies for the + # operations inside a tf.cond. + # - We also cannot have a data dependency to an operation in a different + # while context. + if self._is_in_control_flow(op) or not self._is_in_outmost_while_loop(op): + if not self._should_trace_in_control_flow(): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_IN_CONTROL_FLOW)) + return True + if self._is_user_included_op(op): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED)) + if tensor_tracer_flags.TT_CHECK_FILTER.value: + logging.info('USER_INCLUDED op %s', op.name) + return False + + if not self._inside_op_range(op_id): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE)) + return True + if not self._is_interesting_op(op): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP)) + return True + if self._is_user_excluded_op(op): + report_handler.instrument_op( + op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED)) + if tensor_tracer_flags.TT_CHECK_FILTER.value: + logging.info('USER_EXCLUDED op %s', op.name) + return True + return False + + def _skip_tensor(self, op_id, out_tensor, report_handler): + """Returns True if we should not trace out_tensor. + + Args: + op_id: Topological index of the op producing tensor. + out_tensor: tf.Tensor + report_handler: An instance of tensor_tracer_report.TTReportHandle. + Returns: + True if the tensor should not be traced, false otherwise. + """ + + # Skips a tensor if the tensor has a non-numeric type. + # Note: we cannot use check_ops.is_numeric_tensor(out_tensor) + # because it also excludes tensors with dtypes, bool, and + # float32_ref, which we actually want to trace. + non_numeric_tensor_types = set([dtypes.variant, dtypes.resource, + dtypes.string]) + if out_tensor.dtype in non_numeric_tensor_types: + + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR)) + return True + # Skip a tensor if it feeds a special while loop op. + if [consumer for consumer in out_tensor.consumers() if + TensorTracer.while_loop_op(consumer)]: + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP)) + return True + if self._is_user_included_op(out_tensor.op): + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED)) + if tensor_tracer_flags.TT_CHECK_FILTER.value: + logging.info('USER_INCLUDED tensor %s', out_tensor.name) + return False + if self._is_user_excluded_op(out_tensor.op): + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED)) + if tensor_tracer_flags.TT_CHECK_FILTER.value: + logging.info('USER_EXCLUDED tensor %s', out_tensor.name) + return True + if not out_tensor.get_shape().is_fully_defined(): + # If trace mode is nan-inf, norm or max, then the tensor will be reduced + # to a scalar before the outside compilation call. + if self._parameters.trace_mode in ( + tensor_tracer_flags.TRACE_MODE_NAN_INF, + tensor_tracer_flags.TRACE_MODE_NORM, + tensor_tracer_flags.TRACE_MODE_HISTORY, + tensor_tracer_flags.TRACE_MODE_MAX_ABS, + tensor_tracer_flags.TRACE_MODE_SUMMARY + ): + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED)) + return False + else: + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE)) + return True + rank = len(out_tensor.shape) + if rank < 1: + # scalar + if self._parameters.trace_scalar_ops: + if TensorTracer.unsafe_scalar_trace(out_tensor.op): + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR)) + return True + else: + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED)) + return False + else: + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR)) + return True + else: + # tensor + report_handler.instrument_tensor( + out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED)) + return False + + def _filter_execution_path_operations(self, operations, fetches): + """Returns the set of ops in the execution path to compute given fetches.""" + + # If no fetch provided, then return all operations. + if fetches is None: + return set(operations) + # Convert to list, if a single element is provided. + if not isinstance(fetches, (list, tuple)): + fetches = [fetches] + # If a tensor is given as fetch, convert it to op. + op_fetches = [] + for fetch in fetches: + if isinstance(fetch, ops.Operation): + op_fetches.append(fetch) + elif isinstance(fetch, tensor_lib.Tensor): + op_fetches.append(fetch.op) + else: + raise RuntimeError('Given fetch:%s is neither a tensor nor an op.' + %fetch) + + execution_path_operations = set(op_fetches) + traverse_stack = list(op_fetches) + while True: + if not traverse_stack: + break + head_op = traverse_stack.pop() + input_ops = [tensor_input.op for tensor_input in head_op.inputs] + input_ops.extend(head_op.control_inputs) + + for input_op in input_ops: + if input_op not in execution_path_operations: + # Filter out loop condition operations, tracing them causes a cycle. + # Trace only the loop-body. + if TensorTracer.loop_cond_op(input_op): + continue + execution_path_operations.add(input_op) + traverse_stack.append(input_op) + return execution_path_operations + + def _determine_and_instrument_traced_tensors(self, graph_order, + ops_in_exec_path, + tensor_trace_points, + report_handler): + """Determines the tensors to trace and instruments the trace details. + + Args: + graph_order: graph_order tuple containing graph (tf.graph), operations + (list of operations), op_to_idx (op id mapping), (tensors) list of + tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether + there is a cycle in the graph), topological_order_or_cycle (list of ops + in topological order or list of ops creating a cycle). + ops_in_exec_path: Set of ops in the execution path. + tensor_trace_points: Collection of programatic tensor trace points. + report_handler: An instance of tensor_tracer_report.TTReportHandle. + Returns: + List of tensors to be traced. + """ + + traced_tensors = [] + checkpoint_operations = set([tensor.op + for (tensor, _) in tensor_trace_points]) + for op_id, op in enumerate(graph_order.operations): + if checkpoint_operations and op not in checkpoint_operations: + continue + if self._skip_op(op_id, op, ops_in_exec_path, report_handler): + continue + for i in range(len(op.outputs)): + out_tensor = op.outputs[i] + if not self._skip_tensor(op_id, out_tensor, report_handler): + traced_tensors.append(out_tensor) + return traced_tensors + + def _check_trace_files(self): + """Checks if any requirements for trace files are satisfied.""" + + if not self._parameters.trace_dir: + # traces will be written to stderr. No need to check trace files. + return + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY: + # Output files are handled by tf.summary operations, no need to precreate + # them. + return + if not gfile.Exists(self._parameters.trace_dir): + file_io.recursive_create_dir(self._parameters.trace_dir) + if not gfile.Exists(self._parameters.trace_dir): + raise RuntimeError('Failed to create trace directory at %s' % + self._parameters.trace_dir) + + def _create_temp_cache(self, num_traced_tensors, num_signatures, graph): + """Creates a temporary cache with the given dimensions. + + Fills the self._temp_cache_var with num_traced_tensors tf.constant() ops + that have shape of [num_signatures]. + Args: + num_traced_tensors: Int, denoting total number of traced tensors. + num_signatures: Int, denoting the number of statistics collected per + tensors. + graph: TensorFlow graph. + """ + init_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE, + dtype=dtypes.float32, + shape=[num_signatures]) + self._temp_cache_var[graph] = [ + init_value for _ in range(num_traced_tensors)] + + def _determine_trace_and_create_report(self, graph, ops_in_exec_path, + graph_summary_tag): + """Work needs to be done prior to TPU or CPU tracing. + + Args: + graph: tf.graph + ops_in_exec_path: Set of operations in the execution path. + graph_summary_tag: the summary tag name for the given graph. + Returns: + An instance of tensor_tracer_report.TensorTraceOrder, containing list of + tensors to be traced with their topological order information. + Raises: + RuntimeError: If opname filtering is incorrectly set. + """ + + self._check_trace_files() + + graph_order = tensor_tracer_report.sort_tensors_and_ops(graph) + tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION) + + report_handler = tensor_tracer_report.TTReportHandle() + traced_tensors = self._determine_and_instrument_traced_tensors( + graph_order, ops_in_exec_path, tensor_trace_points, report_handler) + logging.info('TensorTracer is tracing %d tensors.', len(traced_tensors)) + if traced_tensors and tensor_tracer_flags.TT_CHECK_FILTER.value: + raise RuntimeError('Verify ops being traced by tensor tracer.') + + tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order, + traced_tensors) + num_signatures = self._num_signature_dimensions() + # Create a cache variable if compact_tracing is used. + if num_signatures and self._use_tensor_values_cache(): + if self._use_temp_cache(): + self._create_temp_cache(len(traced_tensors), num_signatures, graph) + else: + self._create_or_get_tensor_values_cache( + _TT_SUMMARY_TAG, graph, [len(traced_tensors), num_signatures]) + if self._parameters.trace_mode in ( + tensor_tracer_flags.TRACE_MODE_HISTORY): + self._create_or_get_tensor_history_values_cache( + _TT_SUMMARY_TAG, graph, [len(traced_tensors), num_signatures]) + if self._parameters.trace_mode in ( + tensor_tracer_flags.TRACE_MODE_SUMMARY, + tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY): + self._report_proto = report_handler.create_report_proto( + self._tt_config, self._parameters, tensor_trace_order, + tensor_trace_points, self._signature_types()) + if self._parameters.use_fingerprint_subdir: + self._parameters.trace_dir = os.path.join( + self._parameters.trace_dir, self._report_proto.fingerprint) + logging.info('TensorTracer updating trace_dir to %s', + self._parameters.trace_dir) + self._report_proto_path = report_handler.report_proto_path( + self._parameters.trace_dir, graph_summary_tag) + + if self._parameters.report_file_path != _SKIP_REPORT_FILE: + report_handler.write_report_proto(self._report_proto_path, + self._report_proto, self._parameters) + else: + if self._parameters.trace_mode not in ( + tensor_tracer_flags.TRACE_MODE_HISTORY): + report_handler.create_report(self._tt_config, self._parameters, + tensor_trace_order, tensor_trace_points) + return tensor_trace_order + + def _create_host_call(self): + return self._parameters.trace_mode in ( + tensor_tracer_flags.TRACE_MODE_SUMMARY, + tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY) + + def _inspect_summary_cache(self, cache, replica_id, step_num, output_stream, + tensor_trace_order): + """Generates a print operation to print trace inspection. + + Args: + cache: Tensor storing the trace results for the step. + replica_id: Tensor storing the replica id of the running core. + step_num: Step number. + output_stream: Where to print the outputs, e.g., file path, or sys.stderr. + tensor_trace_order: TensorTraceOrder object holding tensorname to id map. + + Returns: + The Op to flush the cache to file. + """ + def _inspect_tensor(tensor): + """Returns the text to be printed for inspection output.""" + if (self._parameters.trace_mode == + tensor_tracer_flags.TRACE_MODE_NAN_INF): + return cond.cond( + math_ops.greater(tensor, 0.0), + lambda: 'has NaNs/Infs!', + lambda: 'has no NaNs or Infs.') + else: + return tensor + + # Check if there are graph operations being profiled. + if not tensor_trace_order.traced_tensors: + logging.warn('Inspect mode has no tensors in the cache to check.') + return control_flow_ops.no_op + + # Check if the cache includes any nan or inf + if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF: + # Cache has 1s or 0s if the mode is NaN_INF + step_has_nan_or_inf = math_ops.greater(math_ops.reduce_sum(cache), 0.0) + else: + # Cache has the actual numerics for other modes. + step_has_nan_or_inf = math_ops.reduce_any( + gen_math_ops.logical_or( + gen_math_ops.is_nan(cache), gen_math_ops.is_inf(cache))) + + # Summarizing message for each step. + step_error_message = cond.cond( + step_has_nan_or_inf, + lambda: 'NaNs or Infs in the step!', + lambda: 'No numerical issues have been found for the step.') + + # No need to print core numbers if the cache is merged already. + if self._parameters.collect_summary_per_core: + stats = ['\n\n', 'core:', replica_id, ',', 'step:', step_num, '-->', + step_error_message, + 'Printing tensors for mode:%s...' % self._parameters.trace_mode] + else: + stats = ['\n\n', 'step:', step_num, '-->', step_error_message, + 'Printing tensors for mode:%s...' % self._parameters.trace_mode] + + for tensor_name, cache_idx in sorted( + tensor_trace_order.tensorname_to_cache_idx.items(), + key=lambda item: item[1]): + if self._parameters.collect_summary_per_core: + stats.extend([ + '\n', 'core:', replica_id, ',', 'step:', step_num, ',', + tensor_name, '-->', _inspect_tensor(cache[cache_idx, 0])]) + else: + stats.extend([ + '\n', 'step:', step_num, ',', + tensor_name, '-->', _inspect_tensor(cache[cache_idx, 0])]) + return logging_ops.print_v2(*stats, summarize=-1, + output_stream=output_stream) + + def _inspect_history_cache(self, cache, replica_id, step_num, + tensor_trace_order): + """Generates a conditional print operation to log differences in tensor values. + + Args: + cache: Tensor storing the trace results for the step. + replica_id: Tensor storing the replica id of the running core. + step_num: Step number. + tensor_trace_order: TensorTraceOrder object holding tensorname to id map. + + Returns: + The Op to flush the cache to file. + """ + # Check if there are graph operations being profiled. + if not tensor_trace_order.traced_tensors: + logging.warn('TT history mode has no tensors in the cache to check.') + return control_flow_ops.no_op + + stats = ['\n\n', 'core:', replica_id, ',', 'step:', step_num] + diffs = [] + for tensor_name, cache_idx in sorted( + tensor_trace_order.tensorname_to_cache_idx.items(), + key=lambda item: item[1]): + + tensor_to_write = cache[cache_idx, 0] + snapshot_variable = self._create_or_get_tensor_history_values_cache( + tensor_to_write.name, tensor_to_write.op.graph, + tensor_to_write.shape.as_list(), tensor_to_write.dtype) + + with ops.control_dependencies([snapshot_variable]): + old_value = state_ops.assign_add(snapshot_variable, 0.0) + + with ops.control_dependencies([old_value]): + new_value = math_ops.cast(tensor_to_write, dtypes.float32) + delta = math_ops.abs(math_ops.subtract(old_value, new_value)) + updated = state_ops.assign(snapshot_variable, new_value) + diffs.append(delta) + with ops.control_dependencies([updated]): + new_value_from_var = state_ops.assign_add(snapshot_variable, 0.0) + + stats.extend([ + '\n', 'core:', replica_id, ',', 'step:', step_num, ',', + tensor_name, '-->', old_value, new_value_from_var, delta]) + + diff_stack = array_ops_stack.stack(diffs) + step_max = math_ops.reduce_max(diff_stack) + + return cond.cond( + math_ops.greater(step_max, tensor_tracer_flags.DELTA_THRESHOLD.value), + lambda: logging_ops.print_v2(*stats, summarize=-1), + lambda: control_flow_ops.no_op()) # pylint: disable=unnecessary-lambda + + def _get_outfile_suffix(self): + if remote_utils.is_remote_path(self._parameters.trace_dir): + return remote_utils.get_appendable_file_encoding() + else: + return '' + + def _generate_flush_cache_op(self, num_replicas, on_tpu, + tensor_trace_order, graph): + """Generates an Op that will flush the cache to file. + + Args: + num_replicas: total number of replicas. + on_tpu: if the graph is executed on TPU. + tensor_trace_order: TensorTraceOrder object holding tensorname to id map. + graph: TensorFlow graph. + + Returns: + The Op to flush the cache to file. + """ + + def _flush_fun(cache, replica_id, step_num): + """Flushes the cache to a file corresponding to replica_id.""" + + def _f(file_index): + """Generates a func that flushes the cache to a file.""" + def _print_cache(): + """Flushes the cache to a file.""" + replica_str = ('%d' % file_index) + if self._parameters.trace_dir: + output_path = (os.path.join(self._parameters.trace_dir, + _COMPACT_TRACE_FILE_PREFIX) + + replica_str + self._get_outfile_suffix()) + output_stream = _OUTPUT_STREAM_ESCAPE + output_path + else: + output_stream = sys.stderr + + new_step_line = _REPLICA_ID_TAG + replica_str + print_ops = [] + if self._parameters.inspect_trace: + if self._num_signature_dimensions() > 1: + raise ValueError('Inspecting multi signatures are not supported.') + if self._parameters.trace_mode in ( + tensor_tracer_flags.TRACE_MODE_HISTORY): + print_ops.append( + self._inspect_history_cache( + cache=cache, + replica_id=replica_id, + step_num=step_num, + tensor_trace_order=tensor_trace_order)) + else: + print_ops.append( + self._inspect_summary_cache( + cache=cache, + replica_id=replica_id, + step_num=step_num, + output_stream=output_stream, + tensor_trace_order=tensor_trace_order)) + else: + for i in range(self._num_signature_dimensions()): + print_ops.append(logging_ops.print_v2( + new_step_line, '\n', + cache[:, i], '\n', + summarize=-1, + output_stream=output_stream)) + with ops.control_dependencies(print_ops): + return constant_op.constant(0).op + return _print_cache + + def _eq(file_index): + return math_ops.equal(replica_id, file_index) + + flush_op_cases = {} + flush_op_cases[_eq(0)] = _f(0) + for i in range(1, num_replicas): + if on_tpu and not self._parameters.collect_summary_per_core: + # If this is the case, the cache is already merged for all cores. + # Only first core flushes the cache. + flush_op_cases[_eq(i)] = control_flow_ops.no_op + else: + flush_op_cases[_eq(i)] = _f(i) + # Each replica needs to determine where to write their output. + # To do this, we check if replica_id is 0, then 1, ..., and then + # num_replicas - 1 statically; and return the corresponding static file + # name. We cannot simply set the file name in python, as replica_id is + # only known during tf runtime, and we cannot create dynamic filenames. + return control_flow_case.case(flush_op_cases, exclusive=True) + + cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph) + if self._use_temp_cache(): + cache_val = cache + else: + cache_val = cache.value() + + if on_tpu: + # If we do not need to collect traces for all cores, merge and aggregate + # per core trace. + if not self._parameters.collect_summary_per_core: + cache_val = self.merge_caches_on_tpu(cache_val) + cache_val = self.aggregate_global_cache(cache_val)[0] + + flush_op = tpu_replication.outside_compilation( + _flush_fun, cache_val, self._replica_id, + array_ops.identity(training_util.get_or_create_global_step())) + else: + global_step = training_util.get_or_create_global_step() + flush_op = _flush_fun(cache_val, self._replica_id, global_step) + + if self._use_temp_cache(): + with ops.control_dependencies([flush_op]): + return constant_op.constant(0).op + else: + # Re-initialize the local cache variable. + with ops.control_dependencies([flush_op]): + reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE, + dtype=cache.dtype, + shape=cache.shape) + assign_op = state_ops.assign(cache, reset_value).op + with ops.control_dependencies([assign_op]): + return constant_op.constant(0).op + + def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu, + tensor_trace_order, graph): + """Flushes the intermediate tensor values in the graph to the cache. + + Args: + tensor_fetches: list of tensor results returned by the model_fn. + op_fetches: list of ops that are returned by the model_fn, e.g., train_op. + on_tpu: if the graph is executed on TPU. + tensor_trace_order: TensorTraceOrder object holding tensorname to id map. + graph: TensorFlow graph. + + Returns: + An identical copy of tensor_fetches. + """ + # Add a dependency to op and tensor fetches to make sure that all tracing + # ops are executed before flushing trace results. + if not tensor_trace_order.traced_tensors: + logging.warn('No tensor values being traced. No flush cache op added.') + return tensor_fetches + with ops.control_dependencies(op_fetches + + [tensor.op for tensor in tensor_fetches]): + flush_cache_op = self._generate_flush_cache_op( + self._tt_config.num_replicas, on_tpu, tensor_trace_order, graph) + return control_flow_ops.tuple(tensor_fetches, + control_inputs=[flush_cache_op]) + + def _process_tensor_fetches(self, tensor_fetches): + """Check that tensor_fetches is not empty and have valid tensors.""" + # If none or empty list. + if tensor_fetches is None: + raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be ' + 'None.') + if not isinstance(tensor_fetches, (list, tuple)): + tensor_fetches = [tensor_fetches] + elif not tensor_fetches: + raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be ' + 'empty list.') + fetches = [] + for fetch in tensor_fetches: + if isinstance(fetch, tensor_lib.Tensor): + fetches.append(fetch) + else: + raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch) + return fetches + + def _process_op_fetches(self, op_fetches): + """Check that op_fetches have valid ops.""" + if op_fetches is None: + return [] + + if not isinstance(op_fetches, (list, tuple)): + op_fetches = [op_fetches] + + fetches = [] + for fetch in op_fetches: + if isinstance(fetch, ops.Operation): + fetches.append(fetch) + elif isinstance(fetch, tensor_lib.Tensor): + fetches.append(fetch.op) + else: + logging.warning('Ignoring the given op_fetch:%s, which is not an op.' % + fetch) + return fetches + + def _convert_fetches_to_input_format(self, input_fetches, current_fetches): + """Changes current_fetches' format, so that it matches input_fetches.""" + if isinstance(input_fetches, tensor_lib.Tensor): + if len(current_fetches) != 1: + raise RuntimeError('Tensor tracer input/output fetches do not match.') + return current_fetches[0] + else: + if len(current_fetches) != len(current_fetches): + raise RuntimeError('Tensor tracer input/output fetches do not match.') + elif isinstance(input_fetches, tuple): + return tuple(current_fetches) + else: + return current_fetches + + def _get_op_control_flow_context(self, op): + """Returns the control flow of the given op. + + Args: + op: tf.Operation for which the control flow context is requested. + Returns: + op_control_flow_context: which the is control flow context of the given + op. If the operation type is LoopExit, returns the outer control flow + context. + """ + # pylint: disable=protected-access + op_control_flow_context = op._control_flow_context + # pylint: enable=protected-access + if control_flow_util.IsLoopExit(op): + op_control_flow_context = op_control_flow_context.outer_context + return op_control_flow_context + + def merge_caches_on_tpu(self, local_tpu_cache_tensor): + """Merges the given caches on tpu. + + Args: + local_tpu_cache_tensor: A local tensor that needs to be merged + by concanting data from other tpu cores. + Returns: + A merged tf.Tensor. + """ + x = array_ops.broadcast_to( + local_tpu_cache_tensor, + shape=[self._tt_config.num_replicas] + + local_tpu_cache_tensor.shape.as_list()) + + if tensor_tracer_flags.TT_SINGLE_CORE_SUMMARIES.value: + return x + + return tpu_ops.all_to_all( + x, concat_dimension=0, split_dimension=0, + split_count=self._tt_config.num_replicas, + group_assignment=[list(range(self._tt_config.num_replicas))]) + + def aggregate_global_cache(self, global_tt_summary_cache): + """Merges the given caches on tpu. + + Args: + global_tt_summary_cache: The global tensor tracer summary cache tensor + with shape (num_cores, num_traced_tensors, num_traced_signatures). First + dimension corresponds to core_id, where global_tpu_cache_tensor[i] + correspond to the local cache from core-i. + Returns: + An aggregated tf.Tensor. + Raises: + RuntimeError: if there is no aggregate function defined for a signature. + """ + + # Merge only statistics tensor, if it is any other tensor we simply, + # concatenate them. + agg_fn_map = self._parameters.get_signature_to_agg_fn_map() + signature_idx_map = self._signature_types() + aggregation_result = [] + for signature, idx in sorted(signature_idx_map.items(), + key=operator.itemgetter(1)): + if signature not in agg_fn_map: + raise RuntimeError('No aggregation function is defined for ' + 'signature %s.' % signature) + # The dimensions of the statistics tensor is + # num_cores x num_traced_tensors x num_signatures + # value[:,:,idx] will return the portion of the tensor related + # to signature. + signature_tensor = global_tt_summary_cache[:, :, idx] + # Merge it along the first (core) axis. + agg_fn = agg_fn_map[signature] + agg_tensor = agg_fn(signature_tensor, axis=0) + aggregation_result.append(agg_tensor) + # Merge results corresponding to different signatures + + merged_signatures = array_ops_stack.stack(aggregation_result) + # merged_signatures has dimensions + # num_signatures x num_traced_tensors, transpose it so that it + # will match with the original structure + # num_traced_tensors x num_signatures. + transposed_signatures = array_ops.transpose(merged_signatures) + # Expand 1 more dimension so that it will match with the expected + # structure num_cores x num_traced_tensors x num_signatures. + return array_ops.expand_dims(transposed_signatures, axis=0) + + def _prepare_host_call_fn(self, processed_t_fetches, + op_fetches, graph, graph_summary_tag): + """Creates a host call function that will write the cache as tb summary. + + Args: + processed_t_fetches: List of tensor provided to session.run. + op_fetches: List of operations provided to session.run. + graph: TensorFlow graph. + graph_summary_tag: the summary_tag name for the given graph. + Raises: + ValueError if trace_dir is not set. + """ + if self._parameters.trace_dir is None: + raise ValueError('Provide a trace_dir for tensor tracer in summary mode. ' + '--trace_dir=/model/dir') + + def _write_cache(step, event_file_suffix=None, **kwargs): + """Writes the given caches as tensor summary. + + Args: + step: Step tensor with dimension [num_cores]. + event_file_suffix: Event filename suffix tensor. + **kwargs: The dictionary of tensors that needs to be written as + summaries. Key and value pairs within kwargs correspond to the tag + name, and tensor content that will be written using summary.write. + The trace_modes that use this function are: + - summary: In summary mode, kwargs includes a single (tag, content) + pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache + variable. The dimension of the signature_cache is: + num_cores x num_traced_tensors x num_signatures. + - full_tensor_summary: kwargs will include all traced tensors. Tag + and content correspond to the name of the tensor, and its actual + content. + Returns: + A tf.Operation that needs to be executed for the host call dependencies. + """ + file_suffix = _TT_EVENT_FILE_SUFFIX + if event_file_suffix is not None: + file_suffix = string_ops.string_join([file_suffix, event_file_suffix], + separator='.') + # TODO(deveci): Parametrize max_queue, so that flushing op can be called + # less frequently. + # Setting max_queue to 100 appears to be safe even when the number of + # iterations are much lower, as the destructor of the writer flushes it. + summary_write_ops = [] + summary_writer = summary.create_file_writer_v2( + self._parameters.trace_dir, + filename_suffix=file_suffix, + max_queue=_TT_SUMMARY_MAX_QUEUE) + graph.add_to_collection( + TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer) + + step_value = step[0] + dt = step_value.dtype + + # The step parameter to a summary write call must be 64-bit. + if dt.__ne__(dtypes.int64) and dt.__ne__( + dtypes.uint64) and dt.__ne__(dtypes.float64): + step_value = math_ops.cast(step_value, dtypes.int64) + + with summary_writer.as_default(): + summary_metadata = summary_pb2.SummaryMetadata( + plugin_data=summary_pb2.SummaryMetadata.PluginData( + plugin_name=_TT_TENSORBOARD_PLUGIN_NAME)) + for key, value in kwargs.items(): + # Check whether we need to compute aggregated statistics that merge + # all cores statistics. + if not self._parameters.collect_summary_per_core: + # Merge only statistics tensor, if it is any other tensor we simply, + # concatenate them. + # Also, if there is only a single core (first dim. is 0), then skip + # aggregation. + if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1: + value = self.aggregate_global_cache(value) + with ops.control_dependencies([summary_writer.init()]): + summary_write_ops.append(summary.write( + _TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag, + value, metadata=summary_metadata, + step=step_value)) + return control_flow_ops.group(summary_write_ops) + + global_step = training_util.get_or_create_global_step() + step = array_ops.reshape(global_step, [1]) + self._host_call_fn = {} + + host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches] + + caches_to_write = {} + with ops.control_dependencies(host_call_deps): + all_caches = self._cache_variable_for_graph(graph) + for cache_name, cache_variable in all_caches.items(): + # Increase the cache rank by 1, so that when host call concatenates + # tensors from different replicas, we can identify them with [core_id]. + new_cache_shape = [1] + new_cache_shape.extend(cache_variable.shape.as_list()) + cache = array_ops.reshape(cache_variable, new_cache_shape) + caches_to_write[cache_name] = cache + # Add step to parameter dictionary. + caches_to_write['step'] = step + # Other options without adding step to parameter dictionary are + # * host_call_fn = (_write_cache(step, caches_to_write)) : fails as it + # considers caches_to_write as a single parameter, rather than a keyword + # parameters. + # * host_call_fn = (_write_cache(step, **caches_to_write)) : fails with + # a syntax error. + self._host_call_fn[_TT_HOSTCALL_KEY] = (_write_cache, caches_to_write) + + def host_call_deps_and_fn(self): + return self._host_call_fn + + def get_traced_op_names(self): + """Returns the set of traced op names.""" + return self._traced_op_names + + def _trace_execution(self, graph, + tensor_fetches, + op_fetches=None, + on_tpu=True): + """Commong tracing function for both CPU and TPUs. + + The caller function should set device_type, num_replicas, + num_replicas_per_host, num_hosts and replica_id before calling + _trace_execution. + + + Args: + graph: the graph of Ops executed on the TPU. + tensor_fetches: a (list,tuple,or a single object) of tensor fetches + returned by model_fn given to session.run. Function must be provided + with as least one tensor to fetch. + op_fetches: A list of op fetches returned by model_fn given to + session.run. op_fetches and tensor_fetches are used to determine the + nodes that will be executed. Can be None. + on_tpu: True if executing on TPU. + + Returns: + tensor_fetches: an exact copy of tensor_fetches that has additional + dependencies. + Raises: + RuntimeError: If tensor_fetches is None or empty. + """ + def _cast_unsupported_dtypes(tensor): + """Casts tensor to a supported type.""" + + if tensor.dtype.__eq__(dtypes.int64): + # outside-compilation doesn't support int64 input yet. + return math_ops.cast(tensor, dtypes.int32) + if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__( + dtypes.float16): + # Since host can't handle bf16, convert tensor to f32. + return math_ops.cast(tensor, dtypes.float32) + return tensor + + trace_mode = self._parameters.trace_mode + device_type = self._tt_config.device_type + # pylint: disable=protected-access + self._outmost_context = graph._get_control_flow_context() + # pylint: enable=protected-access + + analytics.track_usage('tensor_tracer', [trace_mode, device_type]) + TensorTracer.check_device_type(device_type) + TensorTracer.check_trace_mode(device_type, trace_mode) + # Check in_tensor_fetches, and op_fetches and convert them to lists. + processed_t_fetches = self._process_tensor_fetches(tensor_fetches) + op_fetches = self._process_op_fetches(op_fetches) + all_fetches = op_fetches + [tensor.op for tensor in processed_t_fetches] + + # Filter out the operations that won't be executed. + # if fetches=None, then ops_in_exec_path = set(operations) + exec_op_set = self._filter_execution_path_operations(graph.get_operations(), + all_fetches) + graph_summary_tag = _graph_summary_tag(graph) + + # Write report file, and determine the traced tensors. + tensor_trace_order = self._determine_trace_and_create_report( + graph, exec_op_set, graph_summary_tag) + + tensor_fetch_set = set(processed_t_fetches) + tracing_ops = [] + + sorted_exec_op_list = list(exec_op_set) + sorted_exec_op_list.sort(key=lambda op: op.name) + # Trace ops only if they are in the execution path. + for op in sorted_exec_op_list: + for i in range(len(op.outputs)): + out_tensor = op.outputs[i] + tensor_name = out_tensor.name + if tensor_name not in tensor_trace_order.tensorname_to_cache_idx: + continue + self._traced_op_names.add(op.name) + # Create the list of consumers before calling _preprocess_traced_tensor. + # Otherwise, adding control input below, will introduce a cycle in the + # graph. + consumers = out_tensor.consumers() + # Not all consumers may be in the exec path. Filter out the consumers + # to keep the graph simpler. + consumers = [cop for cop in consumers if cop in exec_op_set] + + # If there is no consumer of the tensor, there is no need to trace it; + # unless the tensor itself is one of the fetches. + is_a_fetched_tensor = out_tensor in tensor_fetch_set + if (not consumers) and (not is_a_fetched_tensor): + continue + + op_control_flow_context = self._get_op_control_flow_context(op) + if op_control_flow_context: + # pylint: disable=protected-access + graph._set_control_flow_context(op_control_flow_context) + # pylint: enable=protected-access + + processed_tensors = self._preprocess_traced_tensor(out_tensor) + + if on_tpu: + for signature in processed_tensors.keys(): + processed_tensors[signature] = _cast_unsupported_dtypes( + processed_tensors[signature]) + + if self._use_tensor_values_cache(): + # Use a small cache (either temp cache or tf local variable) to store + # the characteristics of the tensor. + if self._use_temp_cache(): + cache_idx = tensor_trace_order.tensorname_to_cache_idx[tensor_name] + self._save_tensor_value_to_tmp_cache(cache_idx, + processed_tensors, + graph) + trace_op = None + else: + cache_idx = tensor_trace_order.tensorname_to_cache_idx[tensor_name] + trace_op = self._save_tensor_value_to_cache_op(cache_idx, + processed_tensors, + graph) + elif self._use_tensor_buffer(): + if len(processed_tensors) != 1: + raise RuntimeError('Multiple stats are only allowed in compact ' + 'mode.') + processed_out_tensor = list(processed_tensors.values())[0] + # Store the whole tensor in a buffer. + trace_op = self._snapshot_tensor(processed_out_tensor) + else: + + def tpu_wrap_trace_fn(tensor, out_tensor_name): + """Wraps the trace_fn with outside compilation if on TPUs.""" + tensor_trace_fn = self._make_tensor_trace_fun(out_tensor_name, + tensor_trace_order) + if on_tpu: + return tpu_replication.outside_compilation( + tensor_trace_fn, tensor) + else: + return tensor_trace_fn(tensor) + + if len(processed_tensors) != 1: + raise RuntimeError('Multiple stats are only allowed in compact ' + 'mode.') + # Collecting multiple statistics are only supported in the summary + # mode that uses compact format(self._use_tensor_values_cache = true). + # Non-compact mode currently allows single stat per tensor. + processed_out_tensor = next(iter(processed_tensors.values())) + trace_op = tpu_wrap_trace_fn(processed_out_tensor, tensor_name) + + if op_control_flow_context: + # pylint: disable=protected-access + graph._set_control_flow_context(self._outmost_context) + # pylint: enable=protected-access + if trace_op: + if is_a_fetched_tensor: + tracing_ops.append(trace_op) + continue + # Add it to all consumers, as some consumers may not be executed if + # they are in a control flow. + for consumer_op in consumers: + # pylint: disable=protected-access + consumer_op._add_control_input(trace_op) + # pylint: enable=protected-access + + # pylint: disable=protected-access + graph._set_control_flow_context(self._outmost_context) + # pylint: enable=protected-access + if tracing_ops: + # If we are tracing a fetched tensor, their dependency is stored in + # tracing_ops. + processed_t_fetches = control_flow_ops.tuple(processed_t_fetches, + control_inputs=tracing_ops) + if self._use_tensor_values_cache() or self._use_tensor_buffer(): + if self._use_temp_cache(): + # Create the temporary tf cache variable by concantanating all + # statistics. + graph_cache_var = self._cache_variable_for_graph(graph) + if graph not in self._temp_cache_var: + raise RuntimeError('graph is not in self._temp_cache_var') + graph_cache_var[_TT_SUMMARY_TAG] = array_ops_stack.stack( + self._temp_cache_var[graph], axis=0, name='stack_all_op_signatures') + if self._create_host_call(): + self._prepare_host_call_fn(processed_t_fetches, op_fetches, graph, + graph_summary_tag) + if not on_tpu: + write_cache, caches_to_write = self._host_call_fn[_TT_HOSTCALL_KEY] + cache_write_op = write_cache(**caches_to_write) + processed_t_fetches = control_flow_ops.tuple( + processed_t_fetches, control_inputs=[cache_write_op]) + del self._host_call_fn[_TT_HOSTCALL_KEY] + elif self._parameters.flush_summaries_with_outside_compile: + write_cache, caches_to_write = self._host_call_fn[_TT_HOSTCALL_KEY] + if (_TT_SUMMARY_TAG in caches_to_write and 'step' in caches_to_write): + step = caches_to_write['step'] + tensor_tracer_summary = caches_to_write[_TT_SUMMARY_TAG] + tt_core_summary = self.merge_caches_on_tpu(tensor_tracer_summary[0]) + if not self._parameters.collect_summary_per_core: + tt_core_summary = self.aggregate_global_cache(tt_core_summary) + + def write_if_core_0(step, replica_id, tt_summary): + + return cond.cond( + math_ops.equal(replica_id, 0), + lambda: write_cache(step=step, event_file_suffix=None, # pylint: disable=g-long-lambda + tensor_tracer_summary=tt_summary), + control_flow_ops.no_op) + + write_op = tpu_replication.outside_compilation( + write_if_core_0, + step=step, + replica_id=self._replica_id, + tt_summary=tt_core_summary) + processed_t_fetches = control_flow_ops.tuple( + processed_t_fetches, control_inputs=[write_op]) + del self._host_call_fn[_TT_HOSTCALL_KEY] + else: + raise ValueError('Outside compiled flush in only supported for ' + 'summary mode') + else: + processed_t_fetches = self._flush_tensor_values_cache( + processed_t_fetches, op_fetches, on_tpu=on_tpu, + tensor_trace_order=tensor_trace_order, + graph=graph) + + # processed_t_fetches is a list at this point. Convert it to the same + # format as given in tensor_fetches. + return self._convert_fetches_to_input_format(tensor_fetches, + processed_t_fetches) + + def trace_tpu(self, graph, + tensor_fetches, + op_fetches=None, + num_replicas=None, + num_replicas_per_host=None, + num_hosts=None): + """Traces the tensors generated by TPU Ops in a TF graph. + + Args: + graph: the graph of Ops executed on the TPU. + tensor_fetches: a (list,tuple,or a single object) of tensor fetches + returned by model_fn given to session.run. Function must be provided + with as least one tensor to fetch. + op_fetches: A list of op fetches returned by model_fn given to + session.run. op_fetches and tensor_fetches are used to determine the + nodes that will be executed. Can be None. + num_replicas: number of replicas used on the TPU. + num_replicas_per_host: number of replicas per TPU host. + num_hosts: total number of TPU hosts. + + Returns: + tensor_fetches: an exact copy of tensor_fetches that has additional + dependencies. + """ + if isinstance(graph, func_graph.FuncGraph) or isinstance( + graph, function._FuncGraph): # pylint: disable=protected-access + logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. ' + 'Ignoring tracing.') + return tensor_fetches + + if graph in TensorTracer._traced_graphs: + logging.warning('Graph is already rewritten with tensor tracer, ignoring ' + 'multiple calls.') + return tensor_fetches + else: + TensorTracer._traced_graphs.add(graph) + # Reset the parameters in case parameters are changed. + self._parameters = tensor_tracer_flags.TTParameters() + self._tt_config.device_type = _DEVICE_TYPE_TPU + self._tt_config.num_replicas = num_replicas + self._tt_config.num_replicas_per_host = num_replicas_per_host + self._tt_config.num_hosts = num_hosts + if self._tt_config.num_replicas is not None: + if self._tt_config.num_replicas_per_host is None: + self._tt_config.num_replicas_per_host = 8 + if self._tt_config.num_hosts is None: + self._tt_config.num_hosts = ( + num_replicas // self._tt_config.num_replicas_per_host + + (num_replicas % self._tt_config.num_replicas_per_host > 0)) + + if self._parameters.graph_dump_path: + graph_io.write_graph(graph, self._parameters.graph_dump_path, + 'graph_before_tt.pbtxt') + with graph.as_default(): + self._add_replica_id_to_graph() + tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches, + on_tpu=True) + if self._parameters.graph_dump_path: + graph_io.write_graph(graph, self._parameters.graph_dump_path, + 'graph_after_tt.pbtxt') + return tensor_fetches + + def trace_cpu(self, graph, tensor_fetches, op_fetches=None): + """Traces the tensors generated by CPU Ops in a TF graph. + + Args: + graph: the graph of Ops executed on the CPU. + tensor_fetches: a (list,tuple,or a single object) of tensor fetches + returned by model_fn given to session.run. Function must be provided + with as least one tensor to fetch. + op_fetches: A list of op fetches returned by model_fn given to + session.run. op_fetches and tensor_fetches are used to determine the + nodes that will be executed. Can be None. + + Returns: + tensor_fetches: an exact copy of tensor_fetches that has additional + dependencies. + """ + if isinstance(graph, func_graph.FuncGraph) or isinstance( + graph, function._FuncGraph): # pylint: disable=protected-access + logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. ' + 'Ignoring tracing.') + return tensor_fetches + + if graph in TensorTracer._traced_graphs: + logging.warning('Graph is already rewritten with tensor tracer, ignoring ' + 'multiple calls.') + return tensor_fetches + else: + TensorTracer._traced_graphs.add(graph) + # Reset the parameters in case parameters are changed. + self._parameters = tensor_tracer_flags.TTParameters() + + self._tt_config.device_type = _DEVICE_TYPE_CPU + self._tt_config.num_replicas = 1 + self._tt_config.num_replicas_per_host = 1 + self._tt_config.num_hosts = 1 + self._replica_id = 0 + if self._parameters.graph_dump_path: + graph_io.write_graph(graph, self._parameters.graph_dump_path, + 'graph_before_tt.pbtxt') + with graph.as_default(): + tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches, + on_tpu=False) + if self._parameters.graph_dump_path: + graph_io.write_graph(graph, self._parameters.graph_dump_path, + 'graph_after_tt.pbtxt') + return tensor_fetches diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_flags.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..e9617ce4178774278d6a48d0cd5a8946bfc6ab4e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_flags.py @@ -0,0 +1,504 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ======================================================================== +"""Utilities to handle tensor tracer parameters.""" + + +import os +import os.path +import re +from absl import flags +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.platform import tf_logging as logging + +TRACE_MODE_PART_TENSOR = 'part-tensor' +TRACE_MODE_FULL_TENSOR = 'full-tensor' +TRACE_MODE_FULL_TENSOR_SUMMARY = 'full_tensor_summary' + +TRACE_MODE_NAN_INF = 'nan-inf' +TRACE_MODE_NORM = 'norm' +TRACE_MODE_MAX_ABS = 'max-abs' +TRACE_MODE_SUMMARY = 'summary' +TRACE_MODE_HISTORY = 'history' +# summary mode to collects a finite set of signatures for each traced tensor, +# (such as norm, max, min, mean) and dumps it using tb summaries. + +# Full tensor mode dumps the whole tensor values for the traced tensors without +# any processing on them; using tb summaries. + +_SUBMODE_BRIEF = 'brief' +_SUBMODE_DETAILED = 'detailed' + +_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'") +_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"') +_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)') +_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*') + +FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS' +FLAG_NAME_ENABLE = 'enable' +FLAG_NAME_TRACE_MODE = 'trace_mode' +FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar' +FLAG_NAME_SUBMODE = 'submode' +FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames' +FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes' +FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames' +FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes' +FLAG_NAME_TRACE_LEVEL = 'trace_level' +FLAG_NAME_TRACE_DIR = 'trace_dir' +FLAG_NAME_REPORT_FILE = 'report_file' +FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir' +FLAG_NAME_OP_RANGE = 'op_range' +# Folder to dump the pre (before tensor tracer updates) and post graphs (after +# tensor tracer updates). +FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs' +FLAG_NAME_SUMMARY_SIGNATURES = 'signatures' +FLAG_NAME_SUMMARY_PER_CORE = 'collect_summary_per_core' +FLAG_NAME_TEMP_CACHE_VAR = 'use_temp_cache' +FLAG_NAME_INSPECT_TRACE = 'inspect_trace' +FLAG_NAME_FINGERPRINT_DIR = 'use_fingerprint_subdirectory' +FLAG_FLUSH_SUMMARY = 'flush_summaries' + + +VALID_FLAG_NAMES = [ + FLAG_NAME_ENABLE, FLAG_NAME_TRACE_MODE, + FLAG_NAME_TRACE_SCALAR_OPS, + FLAG_NAME_SUBMODE, FLAG_NAME_EXCLUDED_OPNAMES, + FLAG_NAME_EXCLUDED_OPTYPES, FLAG_NAME_INCLUDED_OPNAMES, + FLAG_NAME_INCLUDED_OPTYPES, FLAG_NAME_TRACE_DIR, + FLAG_NAME_REPORT_FILE, + FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR, + FLAG_NAME_OP_RANGE, + FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS, FLAG_NAME_TRACE_LEVEL, + FLAG_NAME_SUMMARY_SIGNATURES, FLAG_NAME_SUMMARY_PER_CORE, + FLAG_NAME_TEMP_CACHE_VAR, FLAG_NAME_FINGERPRINT_DIR, + FLAG_NAME_INSPECT_TRACE, FLAG_FLUSH_SUMMARY, +] + +_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)') +_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR' + +_TT_DEFAULT_TRACE_LEVEL = 3 +_TT_PREFIX = 'tensor_tracer' + +_TT_NORM = 'norm' +_TT_MAX = 'max' +_TT_MAX_ABS = 'max-abs' +_TT_MIN = 'min' +_TT_SPARSITY = 'sparsity' +_TT_MEAN = 'mean' +_TT_VAR = 'var' +_TT_SIZE = 'size' + +TT_SUMMARY_NORM = '%s_%s' % (_TT_PREFIX, _TT_NORM) +TT_SUMMARY_MAX = '%s_%s' % (_TT_PREFIX, _TT_MAX) +TT_SUMMARY_MAX_ABS = '%s_%s' % (_TT_PREFIX, _TT_MAX_ABS) +TT_SUMMARY_MIN = '%s_%s' % (_TT_PREFIX, _TT_MIN) +TT_SUMMARY_SPARSITY = '%s_%s' % (_TT_PREFIX, _TT_SPARSITY) +TT_SUMMARY_MEAN = '%s_%s' % (_TT_PREFIX, _TT_MEAN) +TT_SUMMARY_VAR = '%s_%s' % (_TT_PREFIX, _TT_VAR) +TT_SUMMARY_SIZE = '%s_%s' % (_TT_PREFIX, _TT_SIZE) + +TT_SUMMARY_SIGNATURES = (TT_SUMMARY_NORM, TT_SUMMARY_MAX, TT_SUMMARY_MIN, + TT_SUMMARY_SPARSITY, TT_SUMMARY_MEAN, TT_SUMMARY_VAR, + TT_SUMMARY_SIZE, TT_SUMMARY_MAX_ABS) + +FLAGS = flags.FLAGS + +DELTA_THRESHOLD = flags.DEFINE_float( + 'delta_threshold', + default=0.5, + help=('Log if history based diff crosses this threshold.')) +TT_CHECK_FILTER = flags.DEFINE_bool( + 'tt_check_filter', + default=False, + help='Terminate early to check op name filtering.') +TT_SINGLE_CORE_SUMMARIES = flags.DEFINE_bool( + 'tt_single_core_summaries', + default=False, + help='Report single core metric and avoid aggregation.') + + +class TTParameters(object): + """A class that handles the parameters of Tensor Tracer.""" + + def __init__(self, env=None): + if env: + self._env = env + else: + self._env = os.environ + self._validate_flag_names() + self.trace_mode = self._get_trace_mode() + self.submode = self._get_submode() + self.trace_dir = self._get_trace_dir() + self.report_file_path = self._get_report_filepath() + self.op_range = self._get_op_range() + self.excluded_opname_re_list = self._flag_value_to_re_list( + FLAG_NAME_EXCLUDED_OPNAMES) + self.excluded_optype_re_list = self._flag_value_to_re_list( + FLAG_NAME_EXCLUDED_OPTYPES) + + self.included_opname_re_list = self._flag_value_to_re_list( + FLAG_NAME_INCLUDED_OPNAMES) + self.included_optype_re_list = self._flag_value_to_re_list( + FLAG_NAME_INCLUDED_OPTYPES) + + self.trace_scalar_ops = self.is_flag_on(FLAG_NAME_TRACE_SCALAR_OPS) + self.use_compact_trace = self.trace_mode in (TRACE_MODE_NAN_INF, + TRACE_MODE_NORM, + TRACE_MODE_HISTORY, + TRACE_MODE_MAX_ABS, + TRACE_MODE_SUMMARY) + self.use_temp_cache_var = self.is_flag_on(FLAG_NAME_TEMP_CACHE_VAR) + self.inspect_trace = self.is_flag_on(FLAG_NAME_INSPECT_TRACE) + self.use_fingerprint_subdir = self.is_flag_on(FLAG_NAME_FINGERPRINT_DIR) + + _, self.graph_dump_path = self.get_flag_value( + FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS) + self.trace_level = self._get_flag_int_value(FLAG_NAME_TRACE_LEVEL, + _TT_DEFAULT_TRACE_LEVEL) + self.summary_signatures = self._get_summary_signatures() + self.collect_summary_per_core = self.is_flag_on(FLAG_NAME_SUMMARY_PER_CORE) + # TODO(b/199284834): Will be resolved with referenced bug. + if self.collect_summary_per_core: + logging.warning('Aggregate signatures are approximate for mean, variance' + ' and sparsity.') + self.flush_summaries_with_outside_compile = self.is_flag_on( + FLAG_FLUSH_SUMMARY) + # Do not produce errors or warnings if Tensor Tracer is not enabled. + if self.is_enabled(): + self._check_flag_errors() + + def _check_flag_errors(self): + if self.trace_mode in (TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY): + if not self.trace_dir: + raise ValueError('trace_dir must be explicitly provided in ' + 'TENSOR_TRACER_FLAGS when summary mode is used.') + + def _get_report_filepath(self): + """Sets the path of the output report file.""" + + found, report_file_path = self.get_flag_value(FLAG_NAME_REPORT_FILE) + if found and report_file_path and self.use_test_undeclared_outputs_dir(): + if os.path.isabs(report_file_path): + raise ValueError('If use_test_undeclared_outputs_dir is set,' + 'report_file_path cannot be an absolute path (%s)' + %report_file_path) + outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR) + report_file_path = os.path.join(outputs_dir, report_file_path) + return report_file_path + + def _get_op_range(self): + """Sets the index range of the Ops that we will consider tracing.""" + found, op_range = self.get_flag_value(FLAG_NAME_OP_RANGE) + if not found or not op_range: + op_range = (-1, -1) # this means including all ops. + return op_range + match = _OP_RANGE_PAT.match(op_range) + if not match: + op_range = (-1, -1) # this means including all ops. + return op_range + op_range = (int(match.group(1)), int(match.group(2))) + return op_range + + def _get_trace_dir(self): + found, trace_dir = self.get_flag_value(FLAG_NAME_TRACE_DIR) + if found and trace_dir and self.use_test_undeclared_outputs_dir(): + raise ValueError( + 'Cannot not use --%s and --%s at the same time' % + (FLAG_NAME_TRACE_DIR, FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)) + if self.use_test_undeclared_outputs_dir(): + trace_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR) + return trace_dir + + def _get_trace_mode(self): + """Checks if the given trace mode is valid.""" + + found, trace_mode = self.get_flag_value(FLAG_NAME_TRACE_MODE) + if not found or not trace_mode: + trace_mode = TRACE_MODE_NORM + valid_trace_modes = [ + TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR, + TRACE_MODE_NORM, TRACE_MODE_MAX_ABS, + TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY, + TRACE_MODE_HISTORY + ] + if trace_mode not in valid_trace_modes: + raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.' + 'Valid trace modes are: %s'%(trace_mode, + valid_trace_modes)) + return trace_mode + + def is_brief_mode(self): + return self.submode == _SUBMODE_BRIEF + + def _get_submode(self): + """Checks if the given submode is valid.""" + + found, submode = self.get_flag_value(FLAG_NAME_SUBMODE) + if not found or not submode: + submode = _SUBMODE_DETAILED + if not submode: + return + valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF] + if submode not in valid_submodes: + raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.' + 'Valid submodes are: %s'%(submode, + valid_submodes)) + return submode + + @staticmethod + def match_next_flag(tt_flags, pos): + """Returns the match for the next TensorTracer flag. + + Args: + tt_flags: a string that contains the flags. + pos: where in flags to start the search. + + Returns: + A pair where the first element is the regular-expression + match found and the second element indicates if the match + has a value. + """ + + match = _FLAG_DOUBLE_QUOTE_PAT.match(tt_flags, pos) + if match: + return match, True + match = _FLAG_SINGLE_QUOTE_PAT.match(tt_flags, pos) + if match: + return match, True + match = _FLAG_NO_QUOTE_PAT.match(tt_flags, pos) + if match: + return match, True + match = _FLAG_NO_EQUAL_PAT.match(tt_flags, pos) + if match: + # The flag is found but is not given a value. + return match, False + # The flag is not found. + return None, False + + def _validate_flag_names(self): + """Validates if the TensorTrace flags passed are valid.""" + tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR) + if not tensor_tracer_flags: + return + pos = 0 + while True: + match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos) + if not match: + break + flag_name = match.group(1) + if flag_name not in VALID_FLAG_NAMES: + raise ValueError( + 'The flag name "%s" passed via the environment variable "%s" ' + 'is invalid. Valid flag names are:' + '\n%s' % (flag_name, FLAGS_ENV_VAR, VALID_FLAG_NAMES)) + pos = match.end() + + def _supported_signatures(self): + """Returns a tuple of supported signatures.""" + return TT_SUMMARY_SIGNATURES + + def _get_summary_signatures(self): + """Verifies and returns the summary signatures. + + Returns: + A dictionary of the signature identifiers {signature: index} that will be + computed when trace_mode is summary. + """ + signatures = self._flag_value_as_list(FLAG_NAME_SUMMARY_SIGNATURES) + supported_signatures = self._supported_signatures() + + tt_signatures = [] + for signature in signatures: + signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature) + if signature in supported_signatures: + tt_signatures.append(signature) + elif signature_with_prefix in supported_signatures: + tt_signatures.append(signature_with_prefix) + else: + logging.warning('Unknown signature:%s. Supported signatures: %s' % + (signature, supported_signatures)) + if not tt_signatures: + # Default case collects norm and max only. + return {TT_SUMMARY_MAX_ABS: 0, TT_SUMMARY_NORM: 1} + else: + return {signature: idx for idx, signature in enumerate(tt_signatures)} + + def get_signature_to_agg_fn_map(self): + """Returns a map that contains the aggregate function for each signature.""" + # TODO(b/199284834): Aggregations are not accurate for mean and sparsity if + # cores have a different number of elements. Variance uses the maximal core + # variance. + return {TRACE_MODE_NORM: linalg_ops.norm, + TRACE_MODE_HISTORY: math_ops.reduce_max, + TRACE_MODE_MAX_ABS: math_ops.reduce_max, + TRACE_MODE_NAN_INF: math_ops.reduce_max, + TT_SUMMARY_NORM: linalg_ops.norm, + TT_SUMMARY_MAX: math_ops.reduce_max, + TT_SUMMARY_MAX_ABS: + lambda t, axis=0: math_ops.reduce_max(math_ops.abs(t), # pylint: disable=g-long-lambda + axis=axis), + TT_SUMMARY_MIN: math_ops.reduce_min, + # Exact if each part has the same number of values. + TT_SUMMARY_SPARSITY: math_ops.reduce_mean, + TT_SUMMARY_MEAN: math_ops.reduce_mean, + TT_SUMMARY_VAR: math_ops.reduce_max, # Simply reduce max variance. + TT_SUMMARY_SIZE: math_ops.reduce_sum} + + def _flag_value_as_list(self, wanted_flag_name): + """Returns the string list of a TensorTracer flag. + + Args: + wanted_flag_name: the name of the flag we are looking for. + + Returns: + The list value of the flag. + """ + string_value_list = [] + found, flag_value = self.get_flag_value(wanted_flag_name) + + if found: + assert flag_value is not None + string_value_list = flag_value.split(',') + return string_value_list + + def _flag_value_as_int_list(self, wanted_flag_name): + """Returns the integer list of a TensorTracer flag. + + Args: + wanted_flag_name: the name of the flag we are looking for. + + Returns: + the value of the flag. + Raises: + RuntimeError: If supposedly deadcode is reached. + """ + int_list = [] + found, flag_value = self.get_flag_value(wanted_flag_name) + + if found and flag_value: + try: + integer_values = flag_value.split(',') + int_list = [int(int_val) for int_val in integer_values] + except ValueError: + logging.warning('Cannot convert %s to int for flag %s', int_list, + wanted_flag_name) + return int_list + + def _get_flag_int_value(self, wanted_flag_name, default_value): + """Returns the int value of a TensorTracer flag. + + Args: + wanted_flag_name: the name of the flag we are looking for. + default_value: the default value for the flag, if not provided. + Returns: + the value of the flag. + Raises: + RuntimeError: If supposedly deadcode is reached. + """ + flag_int_value = default_value + found, flag_value = self.get_flag_value(wanted_flag_name) + + if found: + try: + flag_int_value = int(flag_value) + except ValueError: + logging.warning('Cannot convert %s to int for flag %s' % ( + flag_int_value, wanted_flag_name)) + return flag_int_value + + def get_flag_value(self, wanted_flag_name): + """Returns the value of a TensorTracer flags. + + Args: + wanted_flag_name: the name of the flag we are looking for. + + Returns: + A pair where the first element indicates if the flag is + found and the second element is the value of the flag. + + Raises: + RuntimeError: If supposedly deadcode is reached. + """ + + tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR) + if not tensor_tracer_flags: + return False, None + pos = 0 + while True: + match, has_value = TTParameters.match_next_flag( + tensor_tracer_flags, pos) + if not match: + return False, None + flag_name = match.group(1) + if has_value: + flag_value = match.group(2) + else: + flag_value = None + if flag_name == wanted_flag_name: + return True, flag_value + pos = match.end() + raise RuntimeError('Invalid tensor tracer flag. Could not recognize %s.' % + flag_name) + + def _flag_value_to_re_list(self, flag_name): + """Converts list of strings to compiled RE.""" + + re_list = [] + found, flag_value = self.get_flag_value(flag_name) + if not found or not flag_value: + return re_list + list_of_values = flag_value.split(',') + for v in list_of_values: + r = re.compile(v) + re_list.append(r) + return re_list + + def is_flag_on(self, flag_name): + """Returns True if the given flag is on.""" + + found, flag_value = self.get_flag_value(flag_name) + if not found: + return False + if flag_value is None: + return True + # Depends on the flag value. + flag_value = flag_value.lower() + enabled = flag_value in ['1', 't', 'true', 'y', 'yes'] + return enabled + + def is_enabled(self): + """Returns True if TensorTracer is enabled.""" + + if self.is_flag_on(FLAG_NAME_ENABLE): + logging.debug('Tensor Tracer is enabled with flags %s.', + self._env.get(FLAGS_ENV_VAR)) + return True + else: + return False + + def use_test_undeclared_outputs_dir(self): + """Decides the output directory of the report and trace files. + + Args: + None. + + Returns: + True if the output files should be written to the + test-undeclared-outputs-directory defined via an + env variable. + """ + + return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ebeb6f38b4cd29029003abd204d404c4343714 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_pb2.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/python/tpu/tensor_tracer.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)tensorflow/python/tpu/tensor_tracer.proto\x12\ntensorflow\x1a%tensorflow/core/framework/graph.proto\"\xb4\t\n\x12TensorTracerReport\x12\x41\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x31.tensorflow.TensorTracerReport.TensorTracerConfig\x12&\n\x08graphdef\x18\x02 \x01(\x0b\x32\x14.tensorflow.GraphDef\x12@\n\ttensordef\x18\x03 \x03(\x0b\x32-.tensorflow.TensorTracerReport.TensordefEntry\x12\x13\n\x0b\x66ingerprint\x18\x04 \x01(\t\x12\x1e\n\x16\x63oncrete_function_name\x18\x05 \x01(\t\x12\x1c\n\x14last_common_frame_no\x18\x06 \x01(\x05\x12\x0f\n\x07outputs\x18\x07 \x03(\t\x12\x42\n\rtracing_stats\x18\x08 \x01(\x0b\x32+.tensorflow.TensorTracerReport.TracingStats\x1a`\n\x0eTensordefEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12=\n\x05value\x18\x02 \x01(\x0b\x32..tensorflow.TensorTracerReport.TracedTensorDef:\x02\x38\x01\x1a\xc8\x01\n\x12TensorTracerConfig\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x01(\t\x12\x12\n\ntrace_mode\x18\x03 \x01(\t\x12\x11\n\tnum_cores\x18\x04 \x01(\x05\x12\x11\n\tnum_hosts\x18\x05 \x01(\x05\x12\x0f\n\x07submode\x18\x06 \x01(\t\x12\x1a\n\x12num_cores_per_host\x18\x07 \x01(\x05\x12\x16\n\x0eincluded_cores\x18\x08 \x03(\x05\x12\x12\n\nsignatures\x18\t \x03(\t\x1a\xef\x01\n\x0cTracingStats\x12\x15\n\rtotal_tensors\x18\x01 \x01(\x05\x12\x16\n\x0etraced_tensors\x18\x02 \x01(\x05\x12_\n\x13traced_tensor_types\x18\x03 \x03(\x0b\x32\x42.tensorflow.TensorTracerReport.TracingStats.TracedTensorTypesEntry\x12\x15\n\radded_tensors\x18\x04 \x01(\x05\x1a\x38\n\x16TracedTensorTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\xa9\x02\n\x0fTracedTensorDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x63\x61\x63he_index\x18\x02 \x01(\x05\x12\x18\n\x10trace_point_name\x18\x03 \x01(\t\x12\x11\n\tis_traced\x18\x04 \x01(\x08\x12\x13\n\x0b\x65xplanation\x18\x05 \x01(\t\x12K\n\rop_stack_info\x18\x06 \x01(\x0b\x32\x34.tensorflow.TensorTracerReport.TracedTensorDef.Stack\x1a\x64\n\x05Stack\x12\x16\n\x0estack_fn_names\x18\x01 \x03(\t\x12\x13\n\x0bstack_lines\x18\x02 \x03(\t\x12\x17\n\x0fstack_filenames\x18\x03 \x03(\t\x12\x15\n\rstack_linenos\x18\x04 \x03(\x05\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.python.tpu.tensor_tracer_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _TENSORTRACERREPORT_TENSORDEFENTRY._options = None + _TENSORTRACERREPORT_TENSORDEFENTRY._serialized_options = b'8\001' + _TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._options = None + _TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._serialized_options = b'8\001' + _TENSORTRACERREPORT._serialized_start=97 + _TENSORTRACERREPORT._serialized_end=1301 + _TENSORTRACERREPORT_TENSORDEFENTRY._serialized_start=460 + _TENSORTRACERREPORT_TENSORDEFENTRY._serialized_end=556 + _TENSORTRACERREPORT_TENSORTRACERCONFIG._serialized_start=559 + _TENSORTRACERREPORT_TENSORTRACERCONFIG._serialized_end=759 + _TENSORTRACERREPORT_TRACINGSTATS._serialized_start=762 + _TENSORTRACERREPORT_TRACINGSTATS._serialized_end=1001 + _TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._serialized_start=945 + _TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._serialized_end=1001 + _TENSORTRACERREPORT_TRACEDTENSORDEF._serialized_start=1004 + _TENSORTRACERREPORT_TRACEDTENSORDEF._serialized_end=1301 + _TENSORTRACERREPORT_TRACEDTENSORDEF_STACK._serialized_start=1201 + _TENSORTRACERREPORT_TRACEDTENSORDEF_STACK._serialized_end=1301 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/topology.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/topology.py new file mode 100644 index 0000000000000000000000000000000000000000..49bdd996c129fa15e008db7d9be4947f78d60fdb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/topology.py @@ -0,0 +1,239 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ====================================== +"""Defines the `Topology` class, that describes a TPU fabric topology.""" + +import numpy as np + +from tensorflow.core.protobuf.tpu import topology_pb2 +from tensorflow.python.util.tf_export import tf_export + + +def _tpu_device_name(job, task, device): + """Returns the device name for the TPU `device` on `task` of `job`.""" + if job is None: + return "/task:%d/device:TPU:%d" % (task, device) + else: + return "/job:%s/task:%d/device:TPU:%d" % (job, task, device) + + +def _tpu_host_device_name(job, task): + """Returns the device name for the CPU device on `task` of `job`.""" + if job is None: + return "/task:%d/device:CPU:0" % task + else: + return "/job:%s/task:%d/device:CPU:0" % (job, task) + + +@tf_export("tpu.experimental.Topology") +class Topology(object): + """Describes a set of TPU devices. + + Represents both the shape of the physical mesh, and the mapping between + TensorFlow TPU devices to physical mesh coordinates. + """ + + def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None): + """Builds a Topology object. + + If `serialized` is not `None`, the topology is parsed from `serialized` and + the other arguments are ignored. Otherwise, the topology is computed from + `mesh_shape` and `device_coordinates`. + + Args: + serialized: A serialized `TopologyProto`, or `None`. If not `None`, the + serialized proto is parsed to discover the topology. + mesh_shape: A sequence of 4 positive integers, or `None`. If not `None`, + the shape of the TPU topology, in number of cores. Ignored if + `serialized` is not `None`. + device_coordinates: A rank 3 numpy array that describes the mapping from + TensorFlow TPU devices to TPU fabric coordinates, or `None`. If + specified, array is a rank 3 int32 array with shape + `[tasks, devices, axis]`. `tasks` is the number of tasks in the TPU + cluster, `devices` is the number of TPU devices per task, and `axis` is + the number of axes in the TPU cluster topology. Each entry gives the + `axis`-th coordinate in the topology of a task/device pair. TPU + topologies are 4-dimensional, with dimensions `(x, y, z, core number)`. + This arg is ignored if `serialized is not `None`. + + Raises: + ValueError: If `serialized` does not describe a well-formed topology. + ValueError: If `serialized` is `None` and `mesh_shape` is not a sequence + of 4 positive integers. + ValueError: If `serialized` is `None` and `device_coordinates` is not a + rank 3 numpy int32 array that describes a valid coordinate mapping. + """ + + self._serialized = serialized + + if serialized: + self._parse_topology(serialized) + else: + self._mesh_shape = np.asarray(mesh_shape, dtype=np.int32) + self._device_coordinates = np.asarray(device_coordinates, np.int32) + if len(self._mesh_shape) != 4 or any(self._mesh_shape < 1): + raise ValueError("`mesh_shape` must be a sequence of 4 positive " + f"entries; got `mesh_shape={self._mesh_shape}`") + + if (len(self._device_coordinates.shape) != 3 or + self._device_coordinates.shape[2] != len(self._mesh_shape)): + raise ValueError( + "`device_coordinates` must be a rank 3 int32 array " + "with minor dimension equal to the `mesh_shape` rank" + "got device_coordinates.shape={} len(device_coordinates.shape)={} device_coordinates.shape[2]={} mesh_shape={}, len(mesh_shape)={}" + .format(self._device_coordinates.shape, + len(self._device_coordinates.shape), + self._device_coordinates.shape[2], self._mesh_shape, + len(self._mesh_shape))) + + self._topology_tasks, self._topology_devices = self._invert_topology() + + # Coordinates of devices that are missing + self._missing_devices = np.argwhere(self._topology_tasks < 0) + + def _parse_topology(self, serialized): + """Parses a serialized `TopologyProto` into `self`.""" + proto = topology_pb2.TopologyProto() + proto.ParseFromString(serialized) + + self._mesh_shape = np.array(proto.mesh_shape, dtype=np.int32) + if len(self._mesh_shape) != 4 or any(self._mesh_shape < 1): + raise ValueError("`mesh_shape` must be a vector of size 4 with positive " + "entries; got {}".format(self._mesh_shape)) + + if proto.num_tasks < 0: + raise ValueError("`num_tasks` must be >= 0; got {}".format( + proto.num_tasks)) + if proto.num_tpu_devices_per_task < 0: + raise ValueError("`num_tpu_devices_per_task` must be >= 0; got {}".format( + proto.num_tpu_devices_per_task)) + + expected_coordinates_size = ( + proto.num_tasks * proto.num_tpu_devices_per_task * len( + proto.mesh_shape)) + if len(proto.device_coordinates) != expected_coordinates_size: + raise ValueError("`device_coordinates` must have shape num_tasks ({}) * " + "num_tpu_devices_per_task ({}) * len(mesh_shape) ({}); " + "got shape {}".format(proto.num_tasks, + proto.num_tpu_devices_per_task, + proto.mesh_shape, + len(proto.device_coordinates))) + + coords = np.array(proto.device_coordinates, dtype=np.int32) + if any(coords < 0): + raise ValueError( + "All values in `device_coordinates` must be >= 0, got {}" + .format(coords)) + coords = coords.reshape((proto.num_tasks, proto.num_tpu_devices_per_task, + len(proto.mesh_shape))) + self._device_coordinates = coords + + def _invert_topology(self): + """Inverts a [task,device,axis] topology to [x,y,z] -> task/device maps.""" + tasks = np.full(list(self.mesh_shape), -1, dtype=np.int32) + devices = np.full(list(self.mesh_shape), -1, dtype=np.int32) + for task in range(self.device_coordinates.shape[0]): + for device in range(self.device_coordinates.shape[1]): + x, y, z, core = self.device_coordinates[task, device, :] + tasks[x, y, z, core] = task + devices[x, y, z, core] = device + return tasks, devices + + @property + def mesh_shape(self): + """A rank 1 int32 array describing the shape of the TPU topology.""" + return self._mesh_shape + + @property + def mesh_rank(self): + """Returns the number of dimensions in the mesh.""" + return len(self._mesh_shape) + + @property + def device_coordinates(self): + """Describes the mapping from TPU devices to topology coordinates. + + Returns: + A rank 3 int32 array with shape `[tasks, devices, axis]`. + `tasks` is the number of tasks in the TPU cluster, `devices` is the number + of TPU devices per task, and `axis` is the number of axes in the TPU + cluster topology. Each entry gives the `axis`-th coordinate in the + topology of a task/device pair. TPU topologies are 4-dimensional, with + dimensions `(x, y, z, core number)`. + """ + return self._device_coordinates + + @property + def missing_devices(self): + """Array of indices of missing devices.""" + return self._missing_devices + + def task_ordinal_at_coordinates(self, device_coordinates): + """Returns the TensorFlow task number attached to `device_coordinates`. + + Args: + device_coordinates: An integer sequence describing a device's physical + coordinates in the TPU fabric. + + Returns: + Returns the TensorFlow task number that contains the TPU device with those + physical coordinates. + """ + return self._topology_tasks[tuple(device_coordinates)] + + def tpu_device_ordinal_at_coordinates(self, device_coordinates): + """Returns the TensorFlow device number at `device_coordinates`. + + Args: + device_coordinates: An integer sequence describing a device's physical + coordinates in the TPU fabric. + + Returns: + Returns the TensorFlow device number within the task corresponding to + attached to the device with those physical coordinates. + """ + return self._topology_devices[tuple(device_coordinates)] + + def cpu_device_name_at_coordinates(self, device_coordinates, job=None): + """Returns the CPU device attached to a logical core.""" + return _tpu_host_device_name( + job, self._topology_tasks[tuple(device_coordinates)]) + + def tpu_device_name_at_coordinates(self, device_coordinates, job=None): + """Returns the name of the TPU device assigned to a logical core.""" + return _tpu_device_name(job, + self._topology_tasks[tuple(device_coordinates)], + self._topology_devices[tuple(device_coordinates)]) + + @property + def num_tasks(self): + """Returns the number of TensorFlow tasks in the TPU slice.""" + return self._device_coordinates.shape[0] + + @property + def num_tpus_per_task(self): + """Returns the number of TPU devices per task in the TPU slice.""" + return self._device_coordinates.shape[1] + + def serialized(self): + """Returns the serialized form of the topology.""" + if self._serialized is None: + proto = topology_pb2.TopologyProto() + proto.mesh_shape[:] = list(self._mesh_shape) + proto.num_tasks = self._device_coordinates.shape[0] + proto.num_tpu_devices_per_task = self._device_coordinates.shape[1] + proto.device_coordinates.extend(list(self._device_coordinates.flatten())) + self._serialized = proto.SerializeToString() + + return self._serialized diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..a38bd9f881ee09b24568b163a2a262ae23ee139c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu.py @@ -0,0 +1,1687 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ====================================== + +"""Library of TPU helper functions.""" + +import collections +import enum +from typing import Any, Callable, Iterable, List, Optional, Text, Tuple, Union + +from absl import logging +import numpy as np + +from tensorflow.compiler.tf2xla.python import xla as tf2xla +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.protobuf.tpu import dynamic_padding_pb2 as dynamic_padding +from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as embedding_pb2 +from tensorflow.python import tf2 +from tensorflow.python.compiler.xla import xla +from tensorflow.python.framework import auto_control_deps +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import config +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import function +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.tpu import device_assignment as device_assignment_lib +from tensorflow.python.tpu import tensor_tracer +from tensorflow.python.tpu import tpu_feed +from tensorflow.python.tpu import tpu_function +from tensorflow.python.tpu import tpu_name_util +from tensorflow.python.tpu import tpu_replication +from tensorflow.python.tpu.ops import tpu_ops +from tensorflow.python.types import core as core_types +from tensorflow.python.util import compat +from tensorflow.python.util import nest +from tensorflow.python.util import object_identity +from tensorflow.python.util import traceback_utils +from tensorflow.python.util import variable_utils +from tensorflow.python.util.tf_export import tf_export + + +# Ops which can be safely pruned from XLA compile if they have no consumers. +# These ops should also have no inputs. +_UNCONNECTED_OPS_TO_PRUNE = set(["Placeholder", "VarHandleOp"]) + +_POST_DEVICE_REWRITE_ATTR = "_post_device_rewrite" +_TPU_COMPILATION_STATUS_ATTR = "_tpu_compilation_status" +_PIVOT_FOR_CLUSTER = "_pivot_for_cluster" + + +core = tpu_name_util.core + + +def _tpu_system_device_name(job: Optional[Text]) -> Text: + """Returns the device name for the TPU_SYSTEM device of `job`.""" + if job is None: + return "/device:TPU_SYSTEM:0" + else: + return "/job:%s/device:TPU_SYSTEM:0" % job + + +@tf_export(v1=["tpu.initialize_system"]) +def initialize_system( + embedding_config: Optional[embedding_pb2.TPUEmbeddingConfiguration] = None, + job: Optional[Text] = None, + compilation_failure_closes_chips: bool = True, + tpu_cancellation_closes_chips: Optional[bool] = None, +) -> core_types.Tensor: + """Initializes a distributed TPU system for use with TensorFlow. + + Args: + embedding_config: If not None, a `TPUEmbeddingConfiguration` proto + describing the desired configuration of the hardware embedding lookup + tables. If embedding_config is None, no hardware embeddings can be used. + job: The job (the XXX in TensorFlow device specification /job:XXX) that + contains the TPU devices that will be initialized. If job=None it is + assumed there is only one job in the TensorFlow flock, and an error will + be returned if this assumption does not hold. + compilation_failure_closes_chips: Set the configuration whether + we want to close TPU chips when there is a compilation failure. + tpu_cancellation_closes_chips: Set the configuration whether + we want to close TPU chips when a TPU execution is cancelled. If the value + is None, the behavior will be determined by the command line flag + `tpu_cancellation_closes_chips` for the TPU worker. WARNING: this argument + only applies to TFRT TPU runtime. + Returns: + A serialized `TopologyProto` that describes the TPU system. Note: + the topology must be evaluated using `Session.run` before it can be used. + """ + config_string = ("" if embedding_config is None else + embedding_config.SerializeToString()) + + # The enum is defined in core/tpu/kernels/tpu_execute_op_options.h. + tpu_cancellation_closes_chips_enum = 0 + if tpu_cancellation_closes_chips is not None: + if tpu_cancellation_closes_chips: + tpu_cancellation_closes_chips_enum = 1 + else: + tpu_cancellation_closes_chips_enum = 2 + + with ops.device(_tpu_system_device_name(job)): + topology = tpu_ops.configure_distributed_tpu( + compilation_failure_closes_chips=compilation_failure_closes_chips, + tpu_cancellation_closes_chips=tpu_cancellation_closes_chips_enum, + ) + + if embedding_config is None: + return topology + + # This set of control dependencies is needed as this function is expected to + # return an op which will return the topology when executed, but we need to + # call the embedding initialization op between initializing the TPU and + # returning the topology. + with ops.control_dependencies([topology]): + embedding_init = tpu_ops.configure_tpu_embedding(config=config_string) + with ops.control_dependencies([embedding_init]): + return array_ops.identity(topology, name="tpu_init_identity") + + +def initialize_system_for_tpu_embedding( + embedding_config: embedding_pb2.TPUEmbeddingConfiguration, + job: Optional[Text] = None, +) -> ops.Operation: + """Initializes a distributed TPU Embedding system for use with TensorFlow. + + The following two are equivalent: + 1. initialize_system() with embedding_config. + 2. initialize_system() without embedding_config, then + initialize_system_for_tpu_embedding(). + initialize_system() should not be called with embedding_config if + initialize_system_for_tpu_embedding() is meant to be called later. + + Args: + embedding_config: a `TPUEmbeddingConfiguration` proto describing the desired + configuration of the hardware embedding lookup tables. + job: The job (the XXX in TensorFlow device specification /job:XXX) that + contains the TPU devices that will be initialized. If job=None it is + assumed there is only one job in the TensorFlow flock, and an error will + be returned if this assumption does not hold. + + Returns: + A no-op. + """ + config_string = embedding_config.SerializeToString() + with ops.device(_tpu_system_device_name(job)): + return tpu_ops.configure_tpu_embedding(config=config_string) + + +@tf_export(v1=["tpu.shutdown_system"]) +def shutdown_system(job: Optional[Text] = None) -> ops.Operation: + """Shuts down a running a distributed TPU system. + + Args: + job: The job (the XXX in TensorFlow device specification /job:XXX) that + contains the TPU devices that will be shutdown. If job=None it is + assumed there is only one job in the TensorFlow flock, and an error will + be returned if this assumption does not hold. + """ + with ops.device(_tpu_system_device_name(job)): + shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu() + return shutdown_distributed_tpu + + +@auto_control_deps.register_acd_resource_resolver +def tpu_replicated_input_resolver( + op: ops.Operation, + resource_reads: object_identity.ObjectIdentitySet, + resource_writes: object_identity.ObjectIdentitySet) -> bool: + """Replaces TPUReplicatedInput outputs with its inputs in resource_inputs.""" + # Ignore TPUReplicatedInput for ACD purposes since we will be directly adding + # control deps on the replicated inputs. + if op.type == "TPUReplicatedInput": + if resource_reads or resource_writes: + resource_reads.clear() + resource_writes.clear() + return True + else: + return False + # Replace tensors in `resource_inputs` which are outputs of TPUReplicatedInput + # with the actual replicated inputs. This allows ACD to correct add control + # deps when there are multiple calls to `run` in a + # `tf.function`. + def replace_with_unreplicated_resources(resource_inputs): + """Replaces handles in `resource_inputs` with their unreplicated inputs.""" + to_remove = [] + to_add = [] + for resource in resource_inputs: + if resource.op.type == "TPUReplicatedInput": + to_remove.append(resource) + to_add.extend(resource.op.inputs) + for t in to_remove: + resource_inputs.discard(t) + resource_inputs.update(to_add) + return to_add or to_remove + + return bool(replace_with_unreplicated_resources(resource_reads) or + replace_with_unreplicated_resources(resource_writes)) + + +@tf_export(v1=["tpu.PaddingSpec"]) +class PaddingSpec(enum.IntEnum): + """Represents the type of padding policies for tpu.replicate.""" + # By default the policy is set to AUTO, the dynamic input shape dimension will + # be pad to maximum of all the replicas. + AUTO = 0 + # Bucketize the dynamic input shape dimension into a power of 2. + POWER_OF_TWO = 1 + + +@tf_export("tpu.XLAOptions") +class XLAOptions( + collections.namedtuple("XLAOptions", [ + "use_spmd_for_xla_partitioning", + "enable_xla_dynamic_padder", + ])): + """XLA compilation options. + + Attributes: + use_spmd_for_xla_partitioning: Boolean. Whether to use XLA's SPMD + partitioner instead of MPMD partitioner when compiler partitioning is + requested. + enable_xla_dynamic_padder: Boolean. Whether to enable XLA dynamic padder + infrastructure to handle dynamic shapes inputs inside XLA. True by + default. Disabling this may cause correctness issues with dynamic shapes + inputs, as XLA will just assume the inputs are with padded shapes. However + users can optionally set it to False to improve device time if masking is + already handled in the user side. + """ + + def __new__(cls, + use_spmd_for_xla_partitioning=True, + enable_xla_dynamic_padder=True): + return super(XLAOptions, cls).__new__(cls, use_spmd_for_xla_partitioning, + enable_xla_dynamic_padder) + + +@tf_export(v1=["tpu.replicate"]) +@traceback_utils.filter_traceback +def replicate( + computation: Callable[..., Any], + inputs: Optional[List[List[core_types.Tensor]]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None, + name: Optional[Text] = None, + maximum_shapes: Optional[Any] = None, + padding_spec: Optional[PaddingSpec] = None, + xla_options: Optional[XLAOptions] = None) -> List[Any]: + """Builds a graph operator that runs a replicated TPU computation. + + Example for the basic usage that `inputs` has static shape: + + ```python + + def computation(x): + x = x + 1 + return tf.math.reduce_mean(x) + + x = tf.convert_to_tensor([1., 2., 3.]) + y = tf.convert_to_tensor([4., 5., 6.]) + tf.compat.v1.tpu.replicate(computation, inputs=[[x], [y]]) + ``` + + If the `inputs` has dynamic shapes and you would like to automatically + bucketize the inputs to avoid XLA recompilation. See the advanced example + below: + + ```python + + def computation(x): + x = x + 1 + return tf.math.reduce_mean(x) + + # Assume input tensors in two replicas `x` and `y` both have dynamic shape + # ([None, 2]). + tf.compat.v1.tpu.replicate( + computation, + inputs=[x, y], + maximum_shapes=[tf.TensorShape([None, None])], + padding_spec=tf.compat.v1.tpu.PaddingSpec.POWER_OF_TWO) + ``` + + Args: + computation: A Python function that builds the computation to replicate. + inputs: A list of lists of input tensors or `None` (equivalent to + `[[]]`), indexed by `[replica_num][input_num]`. All replicas must + have the same number of inputs. Each input can be a nested structure + containing values that are convertible to tensors. Note that passing an + N-dimension list of compatible values will result in a N-dimension list of + scalar tensors rather than a single Rank-N tensors. If you need different + behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. + infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple + of arguments as inputs to computation. + device_assignment: If not `None`, a `DeviceAssignment` describing the + mapping between logical cores in the computation with physical cores in + the TPU topology. Uses a default device assignment if `None`. The + `DeviceAssignment` may be omitted if each replica of the computation uses + only one core, and there is either only one replica, or the number of + replicas is equal to the number of cores in the TPU system. + name: (Deprecated) Does nothing. + maximum_shapes: A nested structure of tf.TensorShape representing the shape + to which the respective component of each input element in each replica + should be padded. Any unknown dimensions (e.g. + tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like + object) will be padded to the maximum size of that dimension over all + replicas. The structure of `maximum_shapes` needs to be the same as + `inputs[0]`. + padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the + padding policy when the `inputs` to `tpu.replicate` is dynamic. + One usage is to enable automatic bucketizing on the inputs by setting the + value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the + recompilation in the XLA side. + xla_options: An instance of `tpu.XLAOptions` which indicates the options + passed to XLA compiler. Use `None` for default options. + Returns: + A list of outputs, indexed by `[replica_num]` each output can be a nested + structure same as what computation() returns with a few exceptions. + + Exceptions include: + 1) None output: a NoOp would be returned which control-depends on + computation. + 2) Single value output: A tuple containing the value would be returned. + 3) Operation-only outputs: a NoOp would be returned which + control-depends on computation. + TODO(b/121383831): Investigate into removing these special cases. + + Raises: + ValueError: If all replicas do not have equal numbers of input tensors. + ValueError: If the number of inputs per replica does not match + the number of formal parameters to `computation`. + ValueError: If the static `inputs` dimensions don't match with the values + given in `maximum_shapes`. + ValueError: If the structure of inputs per replica does not match + the structure of `maximum_shapes`. + """ + return split_compile_and_replicate( + computation, + inputs, + infeed_queue, + device_assignment, + name, + maximum_shapes=maximum_shapes, + padding_spec=padding_spec, + xla_options=xla_options)[1] + + +def _ceil_to_pow_of_n(x, n): + """Ceil input `x` to power of `n`.""" + x = math_ops.cast(x, dtypes.float32) + lognx = math_ops.log(x) / math_ops.log(n * 1.0) + lognx = math_ops.ceil(lognx) + result = math_ops.pow(n * 1.0, lognx) + result = math_ops.cast(result, dtypes.int32) + return result + + +def _pad_all_input( + inputs: Iterable[core_types.Tensor], + padded_shapes: List[Optional[tensor_shape.TensorShape]], + padding_spec: PaddingSpec +) -> Tuple[List[List[Any]], List[dynamic_padding.PaddingMap]]: + """Pad all input tensors given padded_shapes. + + The real shape tensors will be concatenated with the padded original inputs. + + Args: + inputs: The original inputs. + padded_shapes: A list of padded shapes for each input. If an entry is None, + no padding is performed. + padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the + padding policy when the `inputs` to `tf.tpu.replicate` is dynamic. + One usage is to enable automatic bucketizing on the inputs by setting the + value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the + recompilation in the XLA side. + + Returns: + The padded inputs and a PaddingMap list which maps the padded input + dimension to the real shape argument index. + """ + # maximum_static_shapes[idx][i] indicates the maximum static size of ith + # dimension of the idx input among all the replicas. + maximum_static_shapes = [] + # need_padding[idx][i] indicates whether the ith dimension of the idx input + # needs padding. + need_padding = [] + input_shape_tensors = [] + for core_idx, inputs_per_core in enumerate(inputs): + for idx, input_tensor in enumerate(inputs_per_core): + input_shape = input_tensor.get_shape().as_list() + if core_idx == 0: + input_shape_tensors.append([]) + maximum_static_shapes.append(input_shape) + need_padding.append(np.full_like(input_shape, False, dtype=bool)) + else: + for i, s in enumerate(input_shape): + if s is None or s != maximum_static_shapes[idx][i]: + need_padding[idx][i] = True + maximum_static_shapes[idx] = max(input_shape, + maximum_static_shapes[idx]) + + # Append _POST_DEVICE_REWRITE_ATTR attributes to the real shape ops. + real_input_shape = array_ops.shape(input_tensor) + real_input_shape.op._set_attr( # pylint: disable=protected-access + _POST_DEVICE_REWRITE_ATTR, + attr_value_pb2.AttrValue(b=True)) + input_shape_tensors[idx].append(real_input_shape) + + maximum_shapes = [] + for shapes_per_input in input_shape_tensors: + maximum_shapes.append( + math_ops.reduce_max(array_ops_stack.stack(shapes_per_input), axis=0)) + + padded_inputs = [] + real_shapes = [] + padding_maps = [] + for core_idx, inputs_per_core in enumerate(inputs): + padded_inputs.append([]) + real_shapes.append([]) + real_shape_idx = len(inputs_per_core) - 1 + for idx, input_tensor in enumerate(inputs_per_core): + input_shape_tensor = input_shape_tensors[idx][core_idx] + input_shape = input_tensor.get_shape().as_list() + padded_shape = padded_shapes[idx] + + # If we have no padded_shape, then skip padding. + if any(need_padding[idx]) and padded_shape is not None: + for i, s in enumerate(input_shape): + if need_padding[idx][i]: + if core_idx == 0: + real_shape_idx += 1 + padding_map = dynamic_padding.PaddingMap() + padding_map.arg_index = idx + padding_map.shape_index = i + padding_map.padding_arg_index = real_shape_idx + padding_maps.append(padding_map) + real_shapes[core_idx].append( + math_ops.cast(input_shape_tensor[i], dtypes.int32)) + + paddings = [] + for i, s in enumerate(padded_shape.dims): + if need_padding[idx][i]: + # The minimum padded dimension size is 2 as XLA doesn't support size + # 1 dynamic size. + minimum_dynamic_dim_size = 2 + if s.value is not None: + # Pad to the given maximum value. + max_dim_size = max(s.value, minimum_dynamic_dim_size) + else: + # If maximum value is not given, then pad to the maximum dimension + # among all the cores. + max_dim_size = math_ops.maximum(maximum_shapes[idx][i], + minimum_dynamic_dim_size) + if padding_spec == PaddingSpec.POWER_OF_TWO: + max_dim_size = _ceil_to_pow_of_n(max_dim_size, 2) + # Pad to the given maximum value. + padding = [0, max_dim_size - input_shape_tensor[i]] + else: + padding = [0, 0] + paddings.append(padding) + + if input_tensor.get_shape().is_fully_defined(): + # TODO(rxsang): This is a hack to make sure padded_input has dynamic + # shapes, so any tf.size/tf.shape op performed on it won't be constant + # folded. Do we have better ways to do it? + padded_input = cond.cond( + array_ops.constant(True), + lambda: array_ops.pad(input_tensor, paddings), # pylint: disable=cell-var-from-loop + lambda: input_tensor) + else: + padded_input = array_ops.pad(input_tensor, paddings) + + # Append _POST_DEVICE_REWRITE_ATTR attributes to all padded inputs. + padded_input.op._set_attr( # pylint: disable=protected-access + _POST_DEVICE_REWRITE_ATTR, + attr_value_pb2.AttrValue(b=True)) + + padded_inputs[core_idx].append(padded_input) + else: + padded_inputs[core_idx].append(input_tensor) + + num_replicas = len(padded_inputs) + for i in range(num_replicas): + padded_inputs[i].extend(real_shapes[i]) + + return padded_inputs, padding_maps + + +def _flatten_and_filter_composite(maybe_composite, non_composite_output, + composite_output=None): + """For an input, replaced the input by a tuple if the input is composite. + + If `maybe_composite` is not composite, return the parameter + `non_composite_output` otherwise return a tuple which consists of the value of + the parameter `composite_output` the same number of times as there are + components of the composite tensor. + + This is useful for computing a mask when flattening nested data with + `expand_composites=True`. For example + + ```python + nest.flatten(data, expand_composites=True) + ``` + + and + + ```python + nest.flatten(nest.map( + data, lambda x: _flatten_and_filter_composite(x, False, True))) + ``` + + will have the same length and second will be True if the tensor in the first + is derived from a expanding a composite tensor. + + Args: + maybe_composite: A value to test for being a composite tensor. + non_composite_output: The value to return when `maybe_composite` is not a + composite. + composite_output: the value to fill the output tuple with if + `maybe_composite` is a composite. + + Returns: + `non_composite_output` or a tuple with multiple copies of + `composite_output`. + """ + + if isinstance(maybe_composite, composite_tensor.CompositeTensor): + num_components = len(nest.flatten(maybe_composite, expand_composites=True)) + return (composite_output,) * num_components + return non_composite_output + + +def split_compile_and_replicate( + computation: Callable[..., Any], + inputs: Optional[List[List[core_types.Tensor]]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None, + name: Optional[Text] = None, + use_tpu: bool = True, + maximum_shapes: Optional[Any] = None, + padding_spec: Optional[PaddingSpec] = None, + xla_options: Optional[XLAOptions] = None, +) -> List[List[core_types.Tensor]]: + """Builds graph operators that runs compilation and replicated computation. + + This is a lower level interface than replicate that returns a separate compile + and execute output tensor. In the generated graph the compile op feeds into + the execute op and no additional compilation is incurred when running the + compile op before the execute op. The compile op returns additional + information about the compilation but does not return the compiled program. + + Args: + computation: A Python function that builds the computation to replicate. + inputs: A list of lists of input tensors or `None` (equivalent to + `[[]]`), indexed by `[replica_num][input_num]`. All replicas must + have the same number of inputs. Each input can be a nested structure + containing values that are convertible to tensors. Note that passing an + N-dimension list of compatible values will result in a N-dimension list of + scalar tensors rather than a single Rank-N tensors. If you need different + behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. + infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple + of arguments as inputs to computation. + device_assignment: If not `None`, a `DeviceAssignment` describing the + mapping between logical cores in the computation with physical cores in + the TPU topology. Uses a default device assignment if `None`. The + `DeviceAssignment` may be omitted if each replica of the computation uses + only one core, and there is either only one replica, or the number of + replicas is equal to the number of cores in the TPU system. + name: (Deprecated) Does nothing. + use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU + backends. Currently, only supports a default placement (computation is + placed on GPU if one is available, and on CPU if not). + maximum_shapes: A nested structure of tf.TensorShape representing the shape + to which the respective component of each input element in each replica + should be padded. Any unknown dimensions (e.g. + tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like + object) will be padded to the maximum size of that dimension over all + replicas. The structure of `maximum_shapes` needs to be the same as + `inputs[0]`. + padding_spec: An enum specified by `tf.tpu.PaddingSpec`. This describes the + padding policy when the `inputs` to `tf.tpu.replicate` is dynamic. + One usage is to enable automatic bucketizing on the inputs by setting the + value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the + recompilation in the XLA side. + xla_options: An instance of `tpu.XLAOptions` which indicates the options + passed to XLA compiler. Use `None` for default options. + + Returns: + A list of lists with the first list corresponding to the compile op and the + second a list of output tensors, indexed by `[replica_num][output_num]`. + Raises: + ValueError: If all replicas do not have equal numbers of input tensors. + ValueError: If the number of inputs per replica does not match + the number of formal parameters to `computation`. + ValueError: If the static `inputs` dimensions don't match with the values + given in `maximum_shapes`. + ValueError: If the structure of inputs per replica does not match + the structure of `maximum_shapes`. + """ + del name + inputs = [[]] if inputs is None else inputs + xla_options = xla_options or XLAOptions() + + metadata_kwargs = {} + if device_assignment is not None: + # Turn the Numpy array into a flattened list so we can pass it as an + # operator attribute. + metadata_kwargs = { + "topology": + device_assignment.topology.serialized(), + "device_assignment": + device_assignment.core_assignment.flatten().tolist() + } + metadata_kwargs["num_cores_per_replica"] = ( + device_assignment.num_cores_per_replica) + + # This entry is used for enabling automatic outside compilation. + metadata_kwargs["allow_soft_placement"] = config.get_soft_device_placement() + if config.get_soft_device_placement(): + logging.info("Automatic outside compilation is enabled. " + "Ops without XLA kernels will be automatically " + "placed on CPU.") + + if not isinstance(inputs, list): + raise TypeError("tpu.replicate() inputs must be a list of lists/tuples, " + f"received {type(inputs)}") + if any(not isinstance(inp, (list, tuple)) for inp in inputs): + raise TypeError( + "tpu.replicate() inputs must be a list of lists/tuples, " + f"received types: {[type(inp) for inp in inputs]}") + + num_replicas = len(inputs) + + # No replicas? Nothing to do. + if num_replicas == 0: + return [] + + # Checks all replicas have the same structure. + for i in range(1, num_replicas): + nest.assert_same_structure(inputs[0], inputs[i]) + + # Explicitly read variables. + inputs = variable_utils.convert_variables_to_tensors(inputs) + # Flatten inputs. This structure may contain None values, which will be + # handled later. + flat_inputs_with_nones = [ + nest.flatten(per_replica_input, expand_composites=True) + for per_replica_input in inputs + ] + # Mask parallel to one replica's inputs with True for tensors coming from + # composites. + is_composite = nest.flatten(nest.map_structure( + lambda x: _flatten_and_filter_composite(x, False, True), inputs[0])) + + # Converts inputs to Tensors, replacing Nones with a placeholder 0 since + # tpu_ops.tpu_replicated_input() can't handle non-Tensor values. + flat_inputs = [] + for inp in flat_inputs_with_nones: + flat_inputs.append([ + constant_op.constant(0) if x is None else ops.convert_to_tensor(x) + for x in inp + ]) + + # Verifies that all replicas have matching numbers and types of inputs + flat_input_types = [x.dtype for x in flat_inputs[0]] + input_arity = len(inputs[0]) + flat_input_arity = len(flat_input_types) + for i in range(num_replicas): + if len(inputs[i]) != input_arity: + raise ValueError("Replicas must have the same number of inputs. " + "Replica 0 had {} inputs, replica {} had {} " + "inputs.".format(input_arity, i, len(inputs[i]))) + + types = [x.dtype for x in flat_inputs[i]] + if types != flat_input_types: + raise ValueError("Replicas must have matching input types. Replica 0 had " + "input types {}, replica {} had input types {}".format( + flat_input_types, i, types)) + + arg_error = xla.check_function_argument_count( + computation, input_arity, infeed_queue) + if arg_error is not None: + if infeed_queue is None: + raise TypeError( + "Supplied computation cannot be called with the specified inputs. " + f"You specified {input_arity} inputs: {[i.name for i in inputs[0]]}, " + f"but the computation needs {arg_error}") + else: + raise TypeError( + "Supplied computation cannot be called with the specified inputs. " + f"You specified {input_arity} inputs: {[i.name for i in inputs[0]]} ", + f"and {infeed_queue.number_of_tuple_elements} additional inputs " + f"from infeed, but the computation needs {arg_error}") + + dynamic_shape_inputs = False + if maximum_shapes: + if infeed_queue: + raise ValueError( + "Dynamic input shapes are not supported with infeed queues") + + # Make sure maximum_shapes has the same structure as inputs. + nest.assert_same_structure(inputs[0], maximum_shapes, check_types=False) + + # Flatten padded shapes: + # For composite tensor components, we don't want to pad them. For each + # entry of maximum_shapes that corresponds to a composite tensor, replace it + # by a tuple of Nones of the same length as the number of components of the + # composite tensor. When we flatten a second time, this makes + # flat_maximum_shapes have the same length as flat_inputs[i]. We can then + # avoid padding these tensors. The assumption is that they will be used by + # outside compilation or that the components are statically shaped and will + # be used by tpu compatible ops. + flat_maximum_shapes = nest.flatten( + [_flatten_and_filter_composite(x, y) + for x, y in zip(nest.flatten(inputs[0]), + nest.flatten(maximum_shapes))]) + flat_maximum_shapes = [ + tensor_shape.TensorShape(s) if s is not None else None + for s in flat_maximum_shapes + ] + nest.assert_same_structure(flat_inputs[0], flat_maximum_shapes, + check_types=False) + + unpadded_inputs = flat_inputs + flat_inputs, padding_maps = _pad_all_input(unpadded_inputs, + flat_maximum_shapes, + padding_spec) + if padding_maps: + dynamic_shape_inputs = True + logging.info("TPU has inputs with dynamic shapes: %s", inputs[0]) + + metadata_kwargs["step_marker_location"] = getattr( + computation, "step_marker_location", "STEP_MARK_AT_ENTRY") + metadata_kwargs["use_spmd_for_xla_partitioning"] = \ + xla_options.use_spmd_for_xla_partitioning + + graph = ops.get_default_graph() + + # Fan-in: Builds a TPUReplicatedInput node for each input. + flat_replicated_inputs = [] + for i in range(0, len(flat_inputs[0])): + replicas = [flat_inputs[replica][i] for replica in range(num_replicas)] + flat_replicated_inputs.append( + tpu_ops.tpu_replicated_input( + replicas, name="input{}".format(i))) + if isinstance(graph, func_graph.FuncGraph): + # When we are in Tensorflow 2.0 function, 'graph' will be a FuncGraph + # object. If both outside graph and this function have a TPU cluster, + # they will have the same cluster name and it will cause problems (because + # we lower functional ops in Tensorflow 2.0). Append function name to + # 'cluster_name' to avoid cluster name collision. + cluster_name = graph.unique_name("cluster_" + graph.name) + else: + cluster_name = graph.unique_name("cluster") + pivot = control_flow_ops.no_op(name=cluster_name + "/pivot") + pivot._set_attr(_PIVOT_FOR_CLUSTER, # pylint: disable=protected-access + attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))) + context = tpu_replication.TPUReplicateContext( + name=cluster_name, num_replicas=num_replicas, pivot=pivot) + try: + context.Enter() + + metadata = tpu_ops.tpu_replicate_metadata( + num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs) + + with tpu_function.tpu_shard_context( + num_replicas), ops.control_dependencies([metadata]): + + if dynamic_shape_inputs and xla_options.enable_xla_dynamic_padder: + for padding_map in padding_maps: + input_shape = flat_replicated_inputs[padding_map.arg_index].shape + flat_replicated_inputs[ + padding_map.arg_index] = tf2xla.set_dynamic_dimension_size( + flat_replicated_inputs[padding_map.arg_index], + padding_map.shape_index, + flat_replicated_inputs[padding_map.padding_arg_index]) + flat_replicated_inputs[padding_map.arg_index].set_shape(input_shape) + + # Add identity ops so even unused inputs are "consumed" by the + # computation. This is to avoid orphaned TPUReplicatedInput nodes. + # TODO(phawkins): consider instead pruning unused TPUReplicatedInput + # and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs. + flat_replicated_inputs = [ + array_ops.identity(x, name="replicated_input_{}".format(i)) + for i, x in enumerate(flat_replicated_inputs) + ] + for i, composite in zip(flat_replicated_inputs, is_composite): + # pylint: disable=protected-access + # Add an attribute to the identity node so that they could be removed in + # encapsulate TPU computation pass if unused. However we don't remove + # inputs when dynamic padding is enabled. + # TODO(rxsang): Use other ways except argument index in padding_map so + # outside compilation can work with dynamic padding correctly. + if not dynamic_shape_inputs or composite: + i.op._set_attr("_tpu_input_identity", + attr_value_pb2.AttrValue(b=True)) + # pylint: enable=protected-access + + # Clobber replicated placeholders with Nones. + computation_inputs = [ + None if inp is None else replicated for replicated, inp in zip( + flat_replicated_inputs, flat_inputs_with_nones[0]) + ] + + # Unflatten the computation inputs to match original input structure. + computation_inputs = nest.pack_sequence_as( + structure=inputs[0], + flat_sequence=computation_inputs[:flat_input_arity], + expand_composites=True) + + # If there is an infeed queue, adds the dequeued values to the + # computation's inputs. + if infeed_queue is not None: + infeed_queue.set_number_of_shards(num_replicas) + for t in infeed_queue.generate_dequeue_op(): + computation_inputs.append(t) + + # Only resource variables work inside a TPU computation, so turn on + # resource variables for the computation. + # TODO(phawkins): consider removing this code. It will + # be less confusing to clients if they knowingly choose to use resource + # variables. + # Partitioned variables is not supported (b/112311320). + vscope = variable_scope.get_variable_scope() + saved_use_resource = vscope.use_resource + saved_custom_getter = vscope.custom_getter + + def custom_getter(getter, name, *args, **kwargs): + """Variables on TPU have a few restrictions.""" + partitioner = kwargs.get("partitioner", None) + if partitioner is not None: + kwargs["partitioner"] = None + logging.warning( + "Partitioned variables are not supported on TPU. Got " + "`partitioner` that is %s for variable %s. " + "Setting `partitioner` to `None`.", partitioner, name) + if saved_custom_getter is None: + return getter(name, *args, **kwargs) + else: + return saved_custom_getter(getter, name, *args, **kwargs) + + vscope.set_use_resource(True) + vscope.set_custom_getter(custom_getter) + + outputs = computation(*computation_inputs) + + vscope.set_use_resource(saved_use_resource) + vscope.set_custom_getter(saved_custom_getter) + + outputs = variable_utils.convert_variables_to_tensors(outputs) + + need_spmd_partitioning = ( + xla_options.use_spmd_for_xla_partitioning and + device_assignment is not None and + device_assignment.num_cores_per_replica > 1) + outputs_is_flat = xla.is_flat(outputs) + if outputs_is_flat: + output_tensors, control_deps, pack_template = _postprocess_flat_outputs( + outputs, need_spmd_partitioning) + else: + output_tensors, control_deps, pack_template = ( + _postprocess_non_flat_outputs(outputs, need_spmd_partitioning)) + + if tensor_tracer.TensorTracer.is_enabled(): + if tf2.enabled(): + logging.warn("TF API ver >= 2.0 detected. " + "Tensor Tracer v1 is not enabled.") + else: + tt = tensor_tracer.TensorTracer() + output_tensors = tt.trace_tpu(ops.get_default_graph(), + output_tensors, control_deps, + num_replicas) + + context.ExitResult(output_tensors) + finally: + context.report_unsupported_operations() + context.Exit() + host_compute_core = context.HostComputeCore() + + if host_compute_core: + attr_value = attr_value_pb2.AttrValue() + attr_value.list.s.extend(compat.as_bytes(x) for x in host_compute_core) + metadata._set_attr("host_compute_core", attr_value) # pylint: disable=protected-access + + with ops.control_dependencies([metadata]): + if use_tpu: + compile_status = tpu_ops.tpu_compilation_result() + op = compile_status.op + attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name)) + op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value) # pylint: disable=protected-access + else: + compile_status = control_flow_ops.no_op(name="compilation_status") + + if not output_tensors: + # Returns a list of NoOps dependent on the replication Op, indexed by + # [replica_num]. + return [ + compile_status, + [ + control_flow_ops.group(control_deps, name="shard_%d" % i) + for i in range(num_replicas) + ] + ] + + # Fan-out: Builds a TPUReplicatedOutput node for each output. + replicated_outputs = [[] for i in range(num_replicas)] + for i, t in enumerate(output_tensors): + + # None values returned by the computation can't be sent to + # tpu_ops.tpu_replicated_output(), we handle them specially here. We can + # avoid the placeholder 0 routine required on the inputs since outputs are + # replicated per-tensor, not per-replica, so we can skip replication. + if t is None: + for replica in range(num_replicas): + replicated_outputs[replica].append(None) + continue + + # Fan-out: Builds a TPUReplicatedOutput node for each output. + ys = tpu_ops.tpu_replicated_output( + t, num_replicas, name="output{}".format(i)) + + # Wraps the outputs in identity operators so the names of any possible + # `fetch` nodes are preserved by the replication rewrite. + with ops.control_dependencies(control_deps): + for replica in range(num_replicas): + replicated_outputs[replica].append( + array_ops.identity( + ys[replica], name="output_%d_shard_%d" % (i, replica))) + + replicated_outputs = [ + nest.pack_sequence_as(pack_template, replica_outs, expand_composites=True) + for replica_outs in replicated_outputs + ] + + return [compile_status, replicated_outputs] + + +def _postprocess_flat_outputs( + outputs: Any, + need_spmd_partitioning: bool +) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]: + """Validates non-flat outputs, add backs device assignments and other attrs. + + Args: + outputs: Output from `computation` inside `tpu.rewrite`. + need_spmd_partitioning: Whether XLA SPMD partitioning is needed. + + Returns: + - Tensors extracted from outputs. + - Operations extracted from outputs. + - A pack template for use with nest.pack_sequence_as to pack the tensors. + """ + # Following code segment is to preserve legacy behavior. Previously we only + # supported flat outputs and thus for consistency it was nice to convert even + # single element into a tuple. But now that we support arbitrary output + # structure, this is no longer necessary. + # TODO(b/121383831): Migrate all legacy use cases and delete this special + # case. + # If the computation returns `None`, make it an empty tuple. + if outputs is None: + outputs = tuple() + + # For legacy / backwards compatibility reasons we return a list for "flat" + # output values (even if the user's flat return value was a different type or + # even just a scalar value) so use nest.flatten to compute a flat list pack + # template. + pack_template = nest.flatten(outputs, expand_composites=False) + + # Even though outputs is already "flat", we flatten any composites so their + # component tensors can be tagged and replicated. The pack_template will be + # used by the caller to repack the composite tensors. + outputs = nest.flatten(outputs, expand_composites=True) + + # Append `no_op` here so that fetching any return value of this function + # will trigger TPUExecute node. + outputs += (control_flow_ops.no_op(),) + + maybe_convert = lambda x: None if x is None else ops.convert_to_tensor(x) + try: + if need_spmd_partitioning: + outputs = [ + o if isinstance(o, ops.Operation) else maybe_convert(o) + for o in outputs + ] + else: + with ops.device(core(0)): + outputs = [ + o if isinstance(o, ops.Operation) else maybe_convert(o) + for o in outputs + ] + except Exception as e: + raise ValueError( + "TPU function return values must all either be Operations or " + f"convertible to Tensors. Got error: {e}") + + # Separates the returned Operations and Tensors. + output_operations = [o for o in outputs if isinstance(o, ops.Operation)] + output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)] + + if outputs != output_tensors + output_operations: + raise ValueError( + "TPU functions must return zero-or more Tensor values followed by " + "zero or more Operations.") + + # Trim operations off the end of the pack template. output_operations has 1 + # extra element due to the no-op that is added. + if len(output_operations) > 1: + pack_template = pack_template[:1 - len(output_operations)] + + # Wraps outputs in Identity ops. Otherwise a replicated input copied + # straight to an output would bypass the replicate(). This would be bad + # because the TPUReplicatedInput/TPUReplicatedOutput operator would not + # be rewritten away, leading to a runtime error. + # TODO(phawkins): extend the rewrite to elide these nodes instead. + new_output_tensors = [] + for t in output_tensors: + if t is None: + new_output_tensors.append(None) + elif need_spmd_partitioning: + o = array_ops.identity(t) + # pylint: disable=protected-access + o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True)) + # pylint: enable=protected-access + new_output_tensors.append(o) + else: + with ops.device(t.device if t.device else core(0)): + o = array_ops.identity(t) + # pylint: disable=protected-access + o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True)) + # pylint: enable=protected-access + new_output_tensors.append(o) + return new_output_tensors, output_operations, pack_template + + +def _postprocess_non_flat_outputs( + outputs: Any, + need_spmd_partitioning: bool +) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]: + """Validates non-flat outputs, add backs device assignments and other attrs. + + Args: + outputs: Output from `computation` inside `tpu.rewrite`. + need_spmd_partitioning: Whether XLA SPMD partitioning is needed. + + Returns: + - Tensors extracted from outputs. + - An empty Operations list because Operations are not allowed in non-flat + outputs. + - A pack template for use with nest.pack_sequence_as to pack the tensors. + """ + + # Flatten output items. + flat_outputs = nest.flatten(outputs, expand_composites=True) + + # Convert all non-None non-Operation outputs to Tensors. + for i, o in enumerate(flat_outputs): + if o is None: + flat_outputs[i] = None + continue + + if isinstance(o, ops.Operation): + raise ValueError( + "tpu.rewrite does not support Operation as return value in non-flat " + "output structure. You can set returned Operations as control " + "dependencies of returned Tensors so Operations are triggered when " + f'Tensors are evaluated. Operation found: "{o.name}"') + + try: + o = ops.convert_to_tensor(o) + except Exception as e: + raise ValueError( + "TPU function return values must all either be Operations or " + f'convertible to Tensors. Got error: "{e}"') + + # Wraps outputs in Identity ops. Otherwise a replicated input copied + # straight to an output would bypass the replicate(). This would be bad + # because the TPUReplicatedInput/TPUReplicatedOutput operator would not + # be rewritten away, leading to a runtime error. + # TODO(phawkins): extend the rewrite to elide these nodes instead. + if need_spmd_partitioning: + o = array_ops.identity(o) + # pylint: disable=protected-access + o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True)) + # pylint: enable=protected-access + flat_outputs[i] = array_ops.identity(o) + else: + with ops.device(o.device if o.device else core(0)): + o = array_ops.identity(o) + # pylint: disable=protected-access + o.op._set_attr("_tpu_output_identity", attr_value_pb2.AttrValue(b=True)) + # pylint: enable=protected-access + flat_outputs[i] = array_ops.identity(o) + + # All flat_outputs are Tensors, and no Operations. + return flat_outputs, [], outputs + + +def split_compile_and_shard( + computation: Callable[..., Any], + inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None, + num_shards: int = 1, + input_shard_axes: Optional[List[int]] = None, + outputs_from_all_shards: Union[bool, List[bool]] = True, + output_shard_axes: Optional[List[int]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None, + name: Optional[Text] = None, + xla_options: Optional[XLAOptions] = None, + ) -> Tuple[ops.Operation, List[core_types.Tensor]]: + """Shards `computation` for parallel execution. + + `inputs` must be a list of Tensors or None (equivalent to an empty list), each + of which has a corresponding split axis (from `input_shard_axes`). Each input + is split into `num_shards` pieces along the corresponding axis, and + computation is applied to each shard in parallel. + + Tensors are broadcast to all shards if they are lexically captured by + `computation`. e.g., + + x = tf.constant(7) + def computation(): + return x + 3 + ... = shard(computation, ...) + + If `outputs_from_all_shards` is true, the outputs from all shards of + `computation` are concatenated back together along their `output_shard_axes`. + Otherwise, each output is taken from an arbitrary shard. + + Inputs and outputs of the computation must be at least rank-1 Tensors. + + Args: + computation: A Python function that builds a computation to apply to each + shard of the input. + inputs: A list of input tensors or None (equivalent to an empty list). Each + input tensor has a corresponding shard axes, given by `input_shard_axes`, + which must have size divisible by `num_shards`. + num_shards: The number of shards. + input_shard_axes: A list of dimensions along which to shard `inputs`, or + `None`. `None` means "shard all inputs along dimension 0". If not `None`, + there must be one dimension per input. + outputs_from_all_shards: Boolean or list of boolean. For each output, if + `True`, outputs from all shards are concatenated along the corresponding + `output_shard_axes` entry. Otherwise, each output is taken + from an arbitrary shard. If the argument is a boolean, the argument's + value is used for each output. + output_shard_axes: A list of dimensions along which to concatenate the + outputs of `computation`, or `None`. `None` means "concatenate all outputs + along dimension 0". If not `None`, there must be one dimension per output. + Ignored if `outputs_from_all_shards` is False. + infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs + of `computation`. + device_assignment: If not `None`, a `DeviceAssignment` describing the + mapping between logical cores in the computation with physical cores in + the TPU topology. Uses a default device assignment if `None`. The + `DeviceAssignment` may be omitted if each shard of the computation uses + only one core, and there is either only one shard, or the number of shards + is equal to the number of cores in the TPU system. + name: (Deprecated) Does nothing. + xla_options: An instance of `tpu.XLAOptions` which indicates the options + passed to XLA compiler. Use `None` for default options. + Returns: + A tuple of (compile op, [output tensors]). + Raises: + ValueError: If num_shards <= 0 + ValueError: If len(input_shard_axes) != len(inputs) + ValueError: If len(output_shard_axes) != len(outputs from `computation`) + """ + # TODO(phawkins): consider adding support for broadcasting Tensors passed as + # inputs. + + if num_shards <= 0: + raise ValueError( + f"num_shards must be a positive integer. Received {num_shards}") + + inputs = [] if inputs is None else inputs + if not isinstance(inputs, list): + raise TypeError("tpu.shard()'s inputs must be a list of Tensors or None. " + f"Received {type(inputs)}") + + # Converts inputs to Tensors. + inputs = [ops.convert_to_tensor(x) for x in inputs] + + if input_shard_axes is None: + input_shard_axes = [0] * len(inputs) + if len(inputs) != len(input_shard_axes): + raise ValueError("Length of input_shard_axes must be equal to the number " + f"of inputs. Received {len(inputs)} inputs and " + f"{len(input_shard_axes)} input_shard_axes.") + + if inputs: + # Splits the `inputs` along the corresponding `input_shard_axes`, giving + # lists with layout [input][shard] + split_inputs = [ + array_ops.split(x, num_shards, axis=axis) + for (axis, x) in zip(input_shard_axes, inputs)] + + # Transposes the input lists to have layout [shard][input] + transposed_inputs = [list(i) for i in zip(*split_inputs)] + else: + transposed_inputs = [[]] * num_shards + + compile_op, outputs = split_compile_and_replicate( + computation, + transposed_inputs, + infeed_queue=infeed_queue, + device_assignment=device_assignment, + name=name, + xla_options=xla_options) + + # There must be at least one shard since num_shards > 0. + # TODO(b/36647078) remove disable when pylint bug is fixed. + # pylint: disable=indexing-exception + if isinstance(outputs[0], ops.Operation): + # pylint: enable=indexing-exception + # There were no outputs from the computation and replicate returned a list + # of NoOps with control dependencies on the computation. Return the first + # one so it can be used as a control dependency or fetch node. + # TODO(b/36647078) remove disable when pylint bug is fixed. + # pylint: disable=indexing-exception + return compile_op, [outputs[0]] + # pylint: enable=indexing-exception + + # TODO(b/36647078) remove disable when pylint bug is fixed. + # pylint: disable=indexing-exception + num_outputs = len(outputs[0]) + # pylint: enable=indexing-exception + + if output_shard_axes is None: + output_shard_axes = [0] * num_outputs + if num_outputs != len(output_shard_axes): + raise ValueError("Length of output_shard_axes must be equal to the number " + f"of outputs. Received {num_outputs} outputs " + f"and {len(output_shard_axes)} output_shard_axes.") + + if isinstance(outputs_from_all_shards, bool): + outputs_from_all_shards = [outputs_from_all_shards] * num_outputs + + if num_outputs != len(outputs_from_all_shards): + raise ValueError( + "Length of outputs_from_all_shards must be equal to the number of " + f"outputs. Received {num_outputs} outputs and " + f"{len(outputs_from_all_shards)} outputs_from_all_shards.") + + results = [] + for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards, + zip(*outputs)): + if all_shards: + # Concatenate all of the outputs together (use stack for scalars). + shape = x[0].shape + is_scalar = shape is not None and (shape.ndims == 0) + results.append((array_ops_stack.stack(list(x)) if is_scalar + else array_ops.concat(list(x), axis=axis))) + else: + # TODO(phawkins): use a smarter policy, e.g., round-robin across shards. + results.append(x[0]) + + return compile_op, results + + +@tf_export(v1=["tpu.shard"]) +@traceback_utils.filter_traceback +def shard( + computation: Callable[..., Any], + inputs: Optional[List[core_types.Tensor]] = None, + num_shards: int = 1, + input_shard_axes: Optional[List[int]] = None, + outputs_from_all_shards: Union[bool, List[bool]] = True, + output_shard_axes: Optional[List[int]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None, + name: Optional[Text] = None, + xla_options: Optional[XLAOptions] = None) -> List[core_types.Tensor]: + """Shards `computation` for parallel execution. + + `inputs` must be a list of Tensors or None (equivalent to an empty list), each + of which has a corresponding split axis (from `input_shard_axes`). Each input + is split into `num_shards` pieces along the corresponding axis, and + computation is applied to each shard in parallel. + + Tensors are broadcast to all shards if they are lexically captured by + `computation`. e.g., + + x = tf.constant(7) + def computation(): + return x + 3 + ... = shard(computation, ...) + + TODO(phawkins): consider adding support for broadcasting Tensors passed + as inputs. + + If `outputs_from_all_shards` is true, the outputs from all shards of + `computation` are concatenated back together along their `output_shard_axes`. + Otherwise, each output is taken from an arbitrary shard. + + Inputs and outputs of the computation must be at least rank-1 Tensors. + + Args: + computation: A Python function that builds a computation to apply to each + shard of the input. + inputs: A list of input tensors or None (equivalent to an empty list). Each + input tensor has a corresponding shard axes, given by `input_shard_axes`, + which must have size divisible by `num_shards`. + num_shards: The number of shards. + input_shard_axes: A list of dimensions along which to shard `inputs`, or + `None`. `None` means "shard all inputs along dimension 0". If not `None`, + there must be one dimension per input. + outputs_from_all_shards: Boolean or list of boolean. For each output, if + `True`, outputs from all shards are concatenated along the corresponding + `output_shard_axes` entry. Otherwise, each output is taken + from an arbitrary shard. If the argument is a boolean, the argument's + value is used for each output. + output_shard_axes: A list of dimensions along which to concatenate the + outputs of `computation`, or `None`. `None` means "concatenate all outputs + along dimension 0". If not `None`, there must be one dimension per output. + Ignored if `outputs_from_all_shards` is False. + infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs + of `computation`. + device_assignment: If not `None`, a `DeviceAssignment` describing the + mapping between logical cores in the computation with physical cores in + the TPU topology. Uses a default device assignment if `None`. The + `DeviceAssignment` may be omitted if each shard of the computation uses + only one core, and there is either only one shard, or the number of shards + is equal to the number of cores in the TPU system. + name: (Deprecated) Does nothing. + xla_options: An instance of `tpu.XLAOptions` which indicates the options + passed to XLA compiler. Use `None` for default options. + Returns: + A list of output tensors. + Raises: + ValueError: If num_shards <= 0 + ValueError: If len(input_shard_axes) != len(inputs) + ValueError: If len(output_shard_axes) != len(outputs from `computation`) + """ + return split_compile_and_shard( + computation, + inputs=inputs, + num_shards=num_shards, + input_shard_axes=input_shard_axes, + outputs_from_all_shards=outputs_from_all_shards, + output_shard_axes=output_shard_axes, + infeed_queue=infeed_queue, + device_assignment=device_assignment, + name=name, + xla_options=xla_options)[1] + + +@tf_export(v1=["tpu.batch_parallel"]) +@traceback_utils.filter_traceback +def batch_parallel( + computation: Callable[..., Any], + inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None, + num_shards: int = 1, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None, + name: Optional[Text] = None, + xla_options: Optional[XLAOptions] = None): + """Shards `computation` along the batch dimension for parallel execution. + + Convenience wrapper around shard(). + + `inputs` must be a list of Tensors or None (equivalent to an empty list). + Each input is split into `num_shards` pieces along the 0-th dimension, and + computation is applied to each shard in parallel. + + Tensors are broadcast to all shards if they are lexically captured by + `computation`. e.g., + + x = tf.constant(7) + def computation(): + return x + 3 + ... = shard(computation, ...) + + The outputs from all shards are concatenated back together along their 0-th + dimension. + + Inputs and outputs of the computation must be at least rank-1 Tensors. + + Args: + computation: A Python function that builds a computation to apply to each + shard of the input. + inputs: A list of input tensors or None (equivalent to an empty list). The + 0-th dimension of each Tensor must have size divisible by `num_shards`. + num_shards: The number of shards. + infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple + of arguments as inputs to `computation`. + device_assignment: If not `None`, a `DeviceAssignment` describing the + mapping between logical cores in the computation with physical cores in + the TPU topology. Uses a default device assignment if `None`. The + `DeviceAssignment` may be omitted if each shard of the computation uses + only one core, and there is either only one shard, or the number of shards + is equal to the number of cores in the TPU system. + name: (Deprecated) Does nothing. + xla_options: An instance of `tpu.XLAOptions` which indicates the options + passed to XLA compiler. Use `None` for default options. + Returns: + A list of output tensors. + Raises: + ValueError: If `num_shards <= 0` + """ + return shard( + computation, + inputs, + num_shards=num_shards, + infeed_queue=infeed_queue, + device_assignment=device_assignment, + name=name, + xla_options=xla_options) + + +@tf_export(v1=["tpu.rewrite"]) +@traceback_utils.filter_traceback +def rewrite( + computation: Callable[..., Any], + inputs: Optional[List[List[Optional[core_types.Tensor]]]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None, + name: Optional[Text] = None, + xla_options: Optional[XLAOptions] = None) -> Any: + """Rewrites `computation` for execution on a TPU system. + + Args: + computation: A Python function that builds a computation to apply to the + input. If the function takes n inputs, 'inputs' should be a list of n + tensors. + + `computation` may return a list of operations and tensors. Tensors must + come before operations in the returned list. The return value of + `rewrite` is a list of tensors corresponding to the tensors from the + output of `computation`. + + All `Operation`s constructed during `computation` will be executed when + evaluating any of the returned output tensors, not just the ones returned. + inputs: A list of input tensors or `None` (equivalent to an empty list). + Each input can be a nested structure containing values that are + convertible to tensors. Note that passing an N-dimension list of + compatible values will result in a N-dimension list of scalar tensors + rather than a single Rank-N tensors. If you need different behavior, + convert part of inputs to tensors with `tf.convert_to_tensor`. + infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple + of arguments as inputs to `computation`. + device_assignment: if not `None`, a `DeviceAssignment` describing the + mapping between logical cores in the computation with physical cores in + the TPU topology. May be omitted for a single-core computation, in which + case the core attached to task 0, TPU device 0 is used. + name: (Deprecated) Does nothing. + xla_options: An instance of `tpu.XLAOptions` which indicates the options + passed to XLA compiler. Use `None` for default options. + Returns: + Same data structure as if computation(*inputs) is called directly with some + exceptions for correctness. Exceptions include: + 1) None output: a NoOp would be returned which control-depends on + computation. + 2) Single value output: A tuple containing the value would be returned. + 3) Operation-only outputs: a NoOp would be returned which + control-depends on computation. + TODO(b/121383831): Investigate into removing these special cases. + """ + # TODO(b/36647078) remove disable when pylint bug is fixed. + # pylint: disable=indexing-exception + return replicate( + computation, + None if inputs is None else [inputs], + infeed_queue=infeed_queue, + device_assignment=device_assignment, + name=name, + xla_options=xla_options)[0] + # pylint: enable=indexing-exception + + # Operations that indicate some error in the user's inference graph. + + +_DENYLISTED_INFERENCE_OPS = set([ + "ReadVariableOp", + "AssignVariableOp", + "AssignAddVariableOp", + "AssignSubVariableOp", + "VarHandleOp", + "Variable", + "VariableV2", +]) + + +def under_tpu_inference_context() -> bool: + """Check if it is currently under `_TPUInferenceContext`.""" + graph = ops.get_default_graph() + while graph: + context = graph._get_control_flow_context() # pylint: disable=protected-access + while context: + if isinstance(context, _TPUInferenceContext): + return True + context = context.outer_context + if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access + graph = graph._outer_graph # pylint: disable=protected-access + elif isinstance(graph, func_graph.FuncGraph): + graph = graph.outer_graph + else: + return False + return False + + +class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext): + """A `ControlFlowContext` for nodes inside a TPU inference computation. + + The primary role of `_TPUInferenceContext` is to indicate the mode of + operation and possibly sanity check operators inside a + tpu.rewrite_for_inference() computation. + """ + + def __init__(self, name: Text, check_ops: bool = True): + super(_TPUInferenceContext, self).__init__() + self._name = name + self._check_ops = check_ops + + def AddOp(self, op): + self._AddOpInternal(op) + + def _AddOpInternal(self, op): + # pylint: disable=protected-access + if self._check_ops and op.type in _DENYLISTED_INFERENCE_OPS: + raise NotImplementedError( + f"Operation of type {op.type} ({op.name}) is not supported on the " + "TPU for inference. Execution will fail if this op is used in the " + "graph. Make sure your variables are using variable_scope.") + if self._outer_context: + self._outer_context.AddInnerOp(op) + + def AddValue(self, val): + result = val + if self._outer_context: + result = self._outer_context.AddValue(val) + return result + + def AddInnerOp(self, op): + self._AddOpInternal(op) + + @property + def grad_state(self): + return None + + +def validate_inference_rewrite_for_variables(graph: ops.Graph): + """Validates whether rewrite_for_inference() 'worked' for variables. + + The rewrite_for_inference() method is supposed to append GuaranteeConstOps + after ReadVariableOps, but this mechanism works only if you are using + tf.compat.v1.get_variable() to create and access variables in your tpu + computation. This validation method can be called immediately after calling + tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added + to the graph. + + Typical usages: + tpu.validate_inference_rewrite_for_variables( + tf.compat.v1.get_default_graph()) + + tpu.validate_inference_rewrite_for_variables(sess.graph) + + Args: + graph: The graph which needs to be validated. + Raises: + RuntimeError: if validation failed. + """ + if not any(x.type == "GuaranteeConst" for x in graph.get_operations()): + raise RuntimeError( + "No GuaranteeConst ops found in the graph after running " + "tpu.rewrite_for_inference(...). Please check that you are using " + "tf.get_variable() to create and access variables in your tpu " + "computation.") + + +def rewrite_for_inference( + computation: Callable[..., Any], + inputs: Optional[List[core_types.Tensor]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + device_assignment: Optional[device_assignment_lib.DeviceAssignment] = None, + name: Optional[Text] = None) -> List[core_types.Tensor]: + """Rewrites `computation` for inference on a TPU system. + + Other than 'rewriting' the computation to run on a TPU, if using variables + in your computation, it moves the ReadVariableOps outside the TPU + computation, and adds GuaranteeConst ops just after the ReadVariableOps. + This mechanism works only if you are using tf.compat.v1.get_variable() to + create and access variables in your tpu computation. You can validate + whether this worked, by calling validate_inference_rewrite_for_variables() + method immediately after this method to check whether GuaranteeConstOps + where added to the graph. + + Args: + computation: A Python function that builds a computation to apply to the + input. If the function takes n inputs, 'inputs' should be a list of n + tensors. If the function returns m outputs, rewrite will return a list of + m tensors. + inputs: A list of input tensors or `None` (equivalent to an empty list). + infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple + of arguments as inputs to `computation`. + device_assignment: if not `None`, a `DeviceAssignment` describing the + mapping between logical cores in the computation with physical cores in + the TPU topology. May be omitted for a single-core computation, in which + case the core attached to task 0, TPU device 0 is used. + name: The name of the operator. + Returns: + A list of output tensors. + """ + + def guarantee_const_getter(getter, name, *args, **kwargs): + with ops.control_dependencies(None): + return array_ops.guarantee_const( + getter(name, *args, **kwargs), name=name + "/GuaranteeConst") + + def wrapped_computation(*args, **kwargs): + """Execute computation under `_TPUInferenceContext`.""" + context = _TPUInferenceContext( + name=ops.get_default_graph().unique_name("rewrite_for_inference")) + try: + context.Enter() + + vscope = variable_scope.get_variable_scope() + prev_custom_getter = vscope.custom_getter + prev_caching_device = vscope.caching_device + vscope.set_custom_getter(guarantee_const_getter) + vscope.set_caching_device(lambda op: op.device) + + result = computation(*args, **kwargs) + + vscope.set_custom_getter(prev_custom_getter) + vscope.set_caching_device(prev_caching_device) + finally: + context.Exit() + return result + + # pylint: disable=undefined-variable + return rewrite( + wrapped_computation, + inputs=inputs, + infeed_queue=infeed_queue, + device_assignment=device_assignment, + name=name) + # pylint: enable=undefined-variable + + +def prune_unconnected_ops_from_xla(prune_graph: ops.Graph): + """Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. + + Args: + prune_graph: A tensorflow graph from which we wish to prune unconnected ops + as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have + no inputs and no consumers. These can often be left behind due to graph + construction rewiring (for instance TF-Hub). While they never execute, + they will cause XLA compile to fail so we strip them from XLA compile by + removing the tpu_replicate attribute. + """ + # Scan over the top level graph and all function graphs. + for graph in [prune_graph] + [ + f for f in prune_graph._functions.values() # pylint: disable=protected-access + ]: + if not isinstance(graph, ops.Graph): + continue + for op in graph.get_operations(): + if op.type not in _UNCONNECTED_OPS_TO_PRUNE: + continue + outputs_consumed = False + for output in op.outputs: + if output.consumers(): + outputs_consumed = True + break + if not outputs_consumed: + logging.info( + "Pruning OP %s of type %s from XLA Compile due to " + "it being disconnected.", op.name, op.type) + op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR) # pylint: disable=protected-access diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1f064359d77631b6de47634eeab7954b477cdf49 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_base.py @@ -0,0 +1,145 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base Class for TPU Embeddings Mid level APIs.""" + +import functools +from typing import Any, Dict, Iterable, Optional, Union, Text + +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import variables as tf_variables +from tensorflow.python.tpu import tpu_embedding_v2_utils +from tensorflow.python.trackable import autotrackable +from tensorflow.python.util import nest + + +class TPUEmbeddingBase(autotrackable.AutoTrackable): + """The TPUEmbedding Base class. + + This class only contains the basic logic to check the feature config and table + config for the tpu embedding mid level APIs. + """ + + def __init__( + self, + feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic + optimizer: Optional[tpu_embedding_v2_utils._Optimizer] = None): # pylint:disable=protected-access + """Creates the TPUEmbeddingBase object.""" + self._feature_config = feature_config + self._output_shapes = [] + for feature in nest.flatten(feature_config): + self._output_shapes.append(feature.output_shape) + # Set table order here to the order of the first occurrence of the table in + # a feature provided by the user. The order of this struct must be fixed + # to provide the user with deterministic behavior over multiple + # instantiations. + self._table_config = [] + for feature in nest.flatten(feature_config): + if feature.table not in self._table_config: + self._table_config.append(feature.table) + + # Ensure tables have unique names. Also error check the optimizer as we + # specifically don't do that in the TableConfig class to allow high level + # APIs that are built on this to use strings/other classes to represent + # optimizers (before they are passed to this class). + table_names = [] + for i, table in enumerate(self._table_config): + if table.optimizer is None: + # TODO(bfontain) Should we allow some sort of optimizer merging here? + table.optimizer = optimizer + if (table.optimizer is not None and + not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)): # pylint: disable=protected-access + raise ValueError("{} is an unsupported optimizer class. Please pass an " + "instance of one of the optimizer classes under " + "tf.tpu.experimental.embedding.".format( + type(table.optimizer))) + if table.name is None: + table.name = "table_{}".format(i) + if table.name in table_names: + raise ValueError("Tables must have a unique name. " + f"Multiple tables with name {table.name} found.") + table_names.append(table.name) + + self._built = False + + @property + def embedding_tables(self): + """Returns a dict of embedding tables, keyed by `TableConfig`.""" + raise NotImplementedError + + def _create_variables(self, table: tpu_embedding_v2_utils.TableConfig, + trainable: bool) -> Dict[Text, tf_variables.Variable]: + """Create all variables including table variables and slot variables.""" + variable_shape = (table.vocabulary_size, table.dim) + + def getter(name, shape, dtype, initializer, trainable): + del shape + # _add_variable_with_custom_getter clears the shape sometimes, so we + # take the global shape from outside the getter. + initial_value = functools.partial( + initializer, variable_shape, dtype=dtype) + return tf_variables.Variable( + name=name, + initial_value=initial_value, + shape=variable_shape, + dtype=dtype, + trainable=trainable) + + def variable_creator(name, initializer, trainable=True): + # Use add_variable_with_custom_getter here so that we take advantage of + # the checkpoint loading to allow restore before the variables get + # created which avoids double initialization. + return self._add_variable_with_custom_getter( + name=name, + initializer=initializer, + shape=variable_shape, + dtype=dtypes.float32, + getter=getter, + trainable=trainable) + + parameters = variable_creator( + table.name, table.initializer, trainable=trainable) + + def slot_creator(name, initializer): + return variable_creator(table.name + "/" + name, initializer, False) + + if table.optimizer is not None: + slot_vars = table.optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access + else: + slot_vars = {} + slot_vars["parameters"] = parameters + return slot_vars + + def _create_variables_and_slots(self): + """Create variables and slots variables for TPU embeddings.""" + raise NotImplementedError + + def build(self): + """Create variables and slots variables for TPU embeddings.""" + if self._built: + return + self._variables = self._create_variables_and_slots() + self._built = True + + def __call__(self, features: Any, weights: Optional[Any] = None) -> Any: + """Call the mid level api to do embedding lookup.""" + if not self._built: + self.build() + return self.embedding_lookup(features, weights) + + def embedding_lookup(self, + features: Any, + weights: Optional[Any] = None) -> Any: + """Lookup the embedding table using the input features.""" + raise NotImplementedError diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_for_serving.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_for_serving.py new file mode 100644 index 0000000000000000000000000000000000000000..9e5da7dd48401fc6ee631d8b5e82c80259e77741 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_for_serving.py @@ -0,0 +1,591 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mid level API for Serving TPU Embeddings.""" +import functools +from typing import Any, Dict, Iterable, Optional, Union + +from absl import logging + +from tensorflow.core.tpu.kernels import sparse_core_layout_pb2 +from tensorflow.python.distribute import distribute_lib +from tensorflow.python.distribute import tpu_strategy +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor +from tensorflow.python.framework.constant_op import constant as tf_constant +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import array_ops_stack +from tensorflow.python.ops import embedding_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops import variables as tf_variables +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.tpu import tpu_embedding_base +from tensorflow.python.tpu import tpu_embedding_v2_utils +from tensorflow.python.tpu import tpu_embedding_v3_utils +from tensorflow.python.types import core +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("tpu.experimental.embedding.TPUEmbeddingForServing") +class TPUEmbeddingForServing(tpu_embedding_base.TPUEmbeddingBase): + """The TPUEmbedding mid level API running on CPU for serving. + + Note: This class is intended to be used for embedding tables that are trained + on TPU and to be served on CPU. Therefore the class should be only initialized + under non-TPU strategy. Otherwise an error will be raised. + + You can first train your model using the TPUEmbedding class and save the + checkpoint. Then use this class to restore the checkpoint to do serving. + + First train a model and save the checkpoint. + ```python + model = model_fn(...) + strategy = tf.distribute.TPUStrategy(...) + with strategy.scope(): + embedding = tf.tpu.experimental.embedding.TPUEmbedding( + feature_config=feature_config, + optimizer=tf.tpu.experimental.embedding.SGD(0.1)) + + # Your custom training code. + + checkpoint = tf.train.Checkpoint(model=model, embedding=embedding) + checkpoint.save(...) + + ``` + + Then restore the checkpoint and do serving. + ```python + + # Restore the model on CPU. + model = model_fn(...) + embedding = tf.tpu.experimental.embedding.TPUEmbeddingForServing( + feature_config=feature_config, + optimizer=tf.tpu.experimental.embedding.SGD(0.1)) + + checkpoint = tf.train.Checkpoint(model=model, embedding=embedding) + checkpoint.restore(...) + + result = embedding(...) + table = embedding.embedding_table + ``` + + NOTE: This class can also be used to do embedding training on CPU. But it + requires the conversion between keras optimizer and embedding optimizers so + that the slot variables can stay consistent between them. + """ + + def __init__( + self, + feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic + optimizer: Optional[tpu_embedding_v2_utils._Optimizer], + experimental_sparsecore_restore_info: Optional[Dict[str, Any]] = None, + ): # pylint:disable=protected-access + """Creates the TPUEmbeddingForServing mid level API object. + + ```python + embedding = tf.tpu.experimental.embedding.TPUEmbeddingForServing( + feature_config=tf.tpu.experimental.embedding.FeatureConfig( + table=tf.tpu.experimental.embedding.TableConfig( + dim=..., + vocabulary_size=...))) + ``` + + Args: + feature_config: A nested structure of + `tf.tpu.experimental.embedding.FeatureConfig` configs. + optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`, + `tf.tpu.experimental.embedding.Adagrad` or + `tf.tpu.experimental.embedding.Adam`. When not created under TPUStrategy + may be set to None to avoid the creation of the optimizer slot + variables, useful for optimizing memory consumption when exporting the + model for serving where slot variables aren't needed. + experimental_sparsecore_restore_info: Information from the sparse core + training, required to restore from checkpoint for serving (like number + of TPU devices used `num_tpu_devices`.) + + Raises: + RuntimeError: If created under TPUStrategy. + """ + super(TPUEmbeddingForServing, self).__init__(feature_config, optimizer) + self._strategy = distribute_lib.get_strategy() + if isinstance( + self._strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2) + ): + raise RuntimeError("Serving on TPU is not yet supported.") + + @property + def embedding_tables( + self, + ) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]: + """Returns a dict of embedding tables, keyed by `TableConfig`.""" + self._maybe_build() + # Only return the tables and not the slot variables. + return { + table: self._variables[table.name]["parameters"] + for table in self._table_config + } + + def _maybe_build(self): + if not self._built: + # This can be called while tracing a function, so we wrap the + # initialization code with init_scope so it runs eagerly, this means that + # it will not be included the function graph generated by tracing so that + # we can be sure that we only initialize the TPU for embeddings exactly + # once. + with ops.init_scope(): + self.build() + + # TODO(silkyarora) Update the tests for all TPU embedding to expect this + # possibly empty information in checkpoints. + def _maybe_delete_sc_layouts_from_checkpoint(self): + # Remove the sparse_core_table_layouts from the checkpoint, it is only + # required for sparsecore. + if ( + hasattr( + self, + tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY, + ) + and not self._get_sparse_core_table_layouts_str() + ): + delattr( + self, + tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY, + ) + + def build(self): + """Create variables and slots variables for TPU embeddings.""" + super().build() + self._maybe_delete_sc_layouts_from_checkpoint() + + def _track_restore_info_for_cpu(self) -> None: + def getter(name, shape, dtype, initializer, trainable): + del shape + # _add_variable_with_custom_getter clears the shape sometimes, so we + # take the global shape from outside the getter. + initial_value = functools.partial(initializer, dtype=dtype) + return tf_variables.Variable( + name=name, + initial_value=initial_value, + shape=None, + dtype=dtype, + trainable=trainable, + ) + + def empty_string(dtype: dtypes.DType): + return tf_constant("", dtype=dtype) + + # _add_variable_with_custom_getter is used here to restore from checkpoint + # at creation time. The layouts from sparse core must be restored from + # checkpoint and before any other tables are restored + setattr( + self, + tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY, + self._add_variable_with_custom_getter( + name=tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY, + initializer=empty_string, + dtype=dtypes.string, + getter=getter, + trainable=False, + ), + ) + + def _get_sparse_core_table_layouts_str(self) -> bytes: + layouts_str = getattr( + self, + tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY, + ) + return layouts_str.read_value().numpy() + + def _create_variables_from_stacked_tables(self): + sc_layouts = sparse_core_layout_pb2.SparseCoreTableLayouts() + sc_layouts.ParseFromString(self._get_sparse_core_table_layouts_str()) + stacked_table_name_to_layouts = {} + for layout in sc_layouts.tables: + stacked_tables_list = stacked_table_name_to_layouts.setdefault( + layout.stacked_table_name, [] + ) + stacked_tables_list.append(layout) + table_to_config = {table.name: table for table in self._table_config} + variables = {} + for stacked_table_name, layouts in stacked_table_name_to_layouts.items(): + logging.info( + "Loading stacked table state variables(%s) for %s tables", + stacked_table_name, + len(layouts), + ) + stacked_var_trackable = ( + tpu_embedding_v3_utils.SparseCoreStackedTableTrackable( + layouts, table_to_config + ) + ) + # The stacked table is added as trackable to the embedding so that the + # checkpoint key corresponsing to stacked table is read. + self._track_trackable(stacked_var_trackable, stacked_table_name) + variables.update(stacked_var_trackable.get_vars()) + return variables + + def _create_variables_and_slots( + self, + ) -> Dict[str, Dict[str, tf_variables.Variable]]: + """Create variables for TPU embeddings. + + Returns: + A dict of dicts. The outer dict is keyed by the table names and the inner + dicts are keyed by 'parameters' and the slot variable names. + """ + self._track_restore_info_for_cpu() + variables = {} + # If there are stacked variables from SC checkpoint process those + # first + stacked_variables = self._create_variables_from_stacked_tables() + for table in self._table_config: + if table.name in stacked_variables: + variables[table.name] = {"parameters": stacked_variables[table.name]} + else: + variables[table.name] = self._create_variables(table, trainable=True) + return variables + + def embedding_lookup( + self, features: Any, weights: Optional[Any] = None + ) -> Any: + """Apply standard lookup ops on CPU. + + Args: + features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or + `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs + will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor` + or `tf.RaggedTensor` is supported per call. + weights: If not `None`, a nested structure of `tf.Tensor`s, + `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except + that the tensors should be of float type (and they will be downcast to + `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the + same for the parallel entries from `features` and similarly for + `tf.RaggedTensor`s we assume the row_splits are the same. + + Returns: + A nested structure of Tensors with the same structure as input features. + """ + return cpu_embedding_lookup( + features, weights, self.embedding_tables, self._feature_config + ) + + +def _ragged_embedding_lookup_with_reduce( + table: tf_variables.Variable, + ragged: ragged_tensor.RaggedTensor, + weights: ragged_tensor.RaggedTensor, + combiner: str, +) -> core.Tensor: + """Compute a ragged lookup followed by a reduce on axis 1. + + Args: + table: The embedding table. + ragged: A RaggedTensor of ids to look up. + weights: A RaggedTensor of weights (or None). + combiner: One of "mean", "sum", "sqrtn". + + Returns: + A Tensor. + """ + if weights is None: + weights = array_ops.ones_like(ragged, dtype=table.dtype) + weights = array_ops.expand_dims(weights, axis=2) + ragged_result = embedding_ops.embedding_lookup(table, ragged) + ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1) + if combiner == "mean": + ragged_result = math_ops.div_no_nan( + ragged_result, math_ops.reduce_sum(weights, axis=1) + ) + elif combiner == "sqrtn": + ragged_result = math_ops.div_no_nan( + ragged_result, + math_ops.sqrt(math_ops.reduce_sum(weights * weights, axis=1)), + ) + return ragged_result + + +@tf_export("tpu.experimental.embedding.serving_embedding_lookup") +def cpu_embedding_lookup( + inputs: Any, + weights: Optional[Any], + tables: Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable], + feature_config: Union[ + tpu_embedding_v2_utils.FeatureConfig, Iterable # pylint:disable=g-bare-generic + ], +) -> Any: + """Apply standard lookup ops with `tf.tpu.experimental.embedding` configs. + + This function is a utility which allows using the + `tf.tpu.experimental.embedding` config objects with standard lookup functions. + This can be used when exporting a model which uses + `tf.tpu.experimental.embedding.TPUEmbedding` for serving on CPU. In particular + `tf.tpu.experimental.embedding.TPUEmbedding` only supports lookups on TPUs and + should not be part of your serving graph. + + Note that TPU specific options (such as `max_sequence_length`) in the + configuration objects will be ignored. + + In the following example we take a trained model (see the documentation for + `tf.tpu.experimental.embedding.TPUEmbedding` for the context) and create a + saved model with a serving function that will perform the embedding lookup and + pass the results to your model: + + ```python + model = model_fn(...) + embedding = tf.tpu.experimental.embedding.TPUEmbedding( + feature_config=feature_config, + batch_size=1024, + optimizer=tf.tpu.experimental.embedding.SGD(0.1)) + checkpoint = tf.train.Checkpoint(model=model, embedding=embedding) + checkpoint.restore(...) + + @tf.function(input_signature=[{'feature_one': tf.TensorSpec(...), + 'feature_two': tf.TensorSpec(...), + 'feature_three': tf.TensorSpec(...)}]) + def serve_tensors(embedding_features): + embedded_features = tf.tpu.experimental.embedding.serving_embedding_lookup( + embedding_features, None, embedding.embedding_tables, + feature_config) + return model(embedded_features) + + model.embedding_api = embedding + tf.saved_model.save(model, + export_dir=..., + signatures={'serving_default': serve_tensors}) + + ``` + + NOTE: It's important to assign the embedding API object to a member of your + model as `tf.saved_model.save` only supports saving variables as one + `Trackable` object. Since the model's weights are in `model` and the + embedding table are managed by `embedding`, we assign `embedding` to an + attribute of `model` so that tf.saved_model.save can find the embedding + variables. + + NOTE: The same `serve_tensors` function and `tf.saved_model.save` call will + work directly from training. + + Args: + inputs: a nested structure of Tensors, SparseTensors or RaggedTensors. + weights: a nested structure of Tensors, SparseTensors or RaggedTensors or + None for no weights. If not None, structure must match that of inputs, but + entries are allowed to be None. + tables: a dict of mapping TableConfig objects to Variables. + feature_config: a nested structure of FeatureConfig objects with the same + structure as inputs. + + Returns: + A nested structure of Tensors with the same structure as inputs. + """ + + nest.assert_same_structure(inputs, feature_config) + + flat_inputs = nest.flatten(inputs) + flat_weights = [None] * len(flat_inputs) + if weights is not None: + nest.assert_same_structure(inputs, weights) + flat_weights = nest.flatten(weights) + flat_features = nest.flatten_with_joined_string_paths(feature_config) + + outputs = [] + for inp, weight, (path, feature) in zip( + flat_inputs, flat_weights, flat_features + ): + table = tables[feature.table] + + if weight is not None: + if isinstance(inp, tensor.Tensor): + raise ValueError( + "Weight specified for {}, but input is dense.".format(path) + ) + elif type(weight) is not type(inp): + raise ValueError( + "Weight for {} is of type {} but it does not match type of the " + "input which is {}.".format(path, type(weight), type(inp)) + ) + elif feature.max_sequence_length > 0: + raise ValueError( + "Weight specified for {}, but this is a sequence feature.".format( + path + ) + ) + + if isinstance(inp, tensor.Tensor): + if feature.max_sequence_length > 0: + raise ValueError( + "Feature {} is a sequence feature but a dense tensor " + "was passed.".format(path) + ) + outputs.append(embedding_ops.embedding_lookup_v2(table, inp)) + + elif isinstance(inp, sparse_tensor.SparseTensor): + outputs.append( + _embedding_lookup_for_sparse_tensor(inp, weight, table, feature) + ) + elif isinstance(inp, ragged_tensor.RaggedTensor): + outputs.append( + _embedding_lookup_for_ragged_tensor(inp, weight, table, feature) + ) + else: + raise ValueError( + "Input {} is type {}. Tensor, SparseTensor or " + "RaggedTensor expected.".format(path, type(inp)) + ) + return nest.pack_sequence_as(feature_config, outputs) + + +def _embedding_lookup_for_sparse_tensor( + inp: sparse_tensor.SparseTensor, + weight: Optional[sparse_tensor.SparseTensor], + table: tf_variables.Variable, + feature: tpu_embedding_v2_utils.FeatureConfig, +) -> tensor.Tensor: + """Embedding lookup for sparse tensor based on its feature config. + + Args: + inp: a single SparseTensor input. + weight: None or SparseTensor which has the same shape of the input. + table: a table variable. + feature: a feature config. + + Returns: + Embedding lookup result. + """ + inp_rank = inp.shape.rank + # The input rank can be None for sequence input tensor. + if ( + not feature.output_shape + and feature.max_sequence_length > 0 + and (inp_rank is None or inp_rank == 2) + ): + batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64) + sparse_shape = array_ops_stack.stack( + [batch_size, feature.max_sequence_length], axis=0 + ) + # TPU Embedding truncates sequences to max_sequence_length, and if we + # don't truncate, scatter_nd will error out if the index was out of + # bounds. + truncated_inp = sparse_ops.sparse_slice( + inp, start=[0, 0], size=sparse_shape + ) + + dense_output_shape = array_ops_stack.stack( + [batch_size, feature.max_sequence_length, feature.table.dim], axis=0 + ) + return array_ops.scatter_nd( + truncated_inp.indices, + array_ops.gather(table.read_value(), truncated_inp.values), + dense_output_shape, + ) + else: + if feature.max_sequence_length > 0: + logging.warning( + ( + "max_sequence_length setting will be ignored because the rank of" + " the input tensor is %d which is not 2." + ), + inp_rank, + ) + if ( + not feature.validate_weights_and_indices + and inp_rank is not None + and inp_rank <= 2 + ): + return embedding_ops.embedding_lookup_sparse_v2( + table, inp, sp_weights=weight, combiner=feature.table.combiner + ) + else: + return embedding_ops.safe_embedding_lookup_sparse_v2( + table, inp, sparse_weights=weight, combiner=feature.table.combiner + ) + + +def _embedding_lookup_for_ragged_tensor( + inp: ragged_tensor.RaggedTensor, + weight: Optional[ragged_tensor.RaggedTensor], + table: tf_variables.Variable, + feature: tpu_embedding_v2_utils.FeatureConfig, +) -> tensor.Tensor: + """Embedding lookup for ragged tensor based on its feature config. + + Args: + inp: a single rank 2 RaggedTensor input. + weight: None or RaggedTensor which has the same shape of the input. + table: a table variable. + feature: a feature config. + + Returns: + Embedding lookup result. + + Raises: + ValueError: if input ragged tensor is not rank 2 or output shape set in the + feature config doesn't match with the first dim size of the input. + """ + if inp.shape.rank != 2: + raise ValueError( + "Only rank 2 ragged tensor is supported, but got rank {}".format( + inp.shape.rank + ) + ) + batch_size = inp.shape[0] + if feature.output_shape: + output_batch_size = math_ops.reduce_prod(feature.output_shape) + # If the output batch size matches the data batch size, treat it as + # normal ragged input. + if output_batch_size == batch_size: + ragged_output = _ragged_embedding_lookup_with_reduce( + table, inp, weight, feature.table.combiner + ) + ragged_output = array_ops.reshape( + ragged_output, shape=feature.output_shape + [feature.table.dim] + ) + # If the data batch size is a factor of the output batch size, the + # divide result will be the sequence length. Ignore the weights and + # combiner. + elif output_batch_size > batch_size and output_batch_size % batch_size == 0: + ragged_output = embedding_ops.embedding_lookup_v2(table, inp) + # Pad or truncate in the sequence dimension + ragged_output = ragged_output.to_tensor( + shape=[batch_size, output_batch_size // batch_size, feature.table.dim] + ) + # Reshape to desire output shape. + ragged_output = array_ops.reshape( + ragged_output, feature.output_shape + [feature.table.dim] + ) + else: + raise ValueError( + "Output shape set in the FeatureConfig should be the factor of " + "the input data batch size. But instead got output shape {}, " + "input data batch size {}".format(feature.output_shape, batch_size) + ) + else: + if feature.max_sequence_length > 0: + output_shape = [ + batch_size, + feature.max_sequence_length, + feature.table.dim, + ] + ragged_lookup = embedding_ops.embedding_lookup_v2(table, inp) + # Unlike scatter_nd, RaggedTensor.to_tensor truncates to the given + # shape. + ragged_output = ragged_lookup.to_tensor(shape=output_shape) + else: + ragged_output = _ragged_embedding_lookup_with_reduce( + table, inp, weight, feature.table.combiner + ) + return ragged_output diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..bf954ac0a55e77d6cbe95ac15f6bde2317ac1d07 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_v2.py @@ -0,0 +1,1762 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mid level API for TPU Embeddings.""" + +import functools +from typing import Any, Callable, Dict, Iterable, List, Optional, Text, Tuple, Union + +from absl import logging + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 +from tensorflow.python.distribute import device_util +from tensorflow.python.distribute import distribute_lib +from tensorflow.python.distribute import distribute_utils +from tensorflow.python.distribute import sharded_variable +from tensorflow.python.distribute import tpu_strategy +from tensorflow.python.eager import context +from tensorflow.python.eager import def_function +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import device as tf_device +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework.tensor_shape import TensorShape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.ops import variables as tf_variables +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.saved_model import registration +from tensorflow.python.saved_model import save_context +from tensorflow.python.tpu import tpu +from tensorflow.python.tpu import tpu_embedding_v2_utils +from tensorflow.python.tpu import tpu_replication +from tensorflow.python.tpu.ops import tpu_ops +from tensorflow.python.trackable import autotrackable +from tensorflow.python.trackable import base +from tensorflow.python.types import internal as internal_types +from tensorflow.python.util import compat +from tensorflow.python.util import nest +from tensorflow.python.util import tf_inspect +from tensorflow.python.util.tf_export import tf_export + + +_HOOK_KEY = "TPUEmbedding_saveable" +_NAME_KEY = "_tpu_embedding_layer" + + +class TPUEmbeddingVariable(sharded_variable.ShardedVariableMixin): + """A ShardedVariable class for TPU.""" + + @property + def _in_graph_mode(self): + return self.variables[0]._in_graph_mode # pylint: disable=protected-access + + +def _add_key_attr(op, name): + op._set_attr(_NAME_KEY, attr_value_pb2.AttrValue(s=compat.as_bytes(name))) # pylint: disable=protected-access + + +@tf_export("tpu.experimental.embedding.TPUEmbedding") +class TPUEmbedding(autotrackable.AutoTrackable): + """The TPUEmbedding mid level API. + + NOTE: When instantiated under a TPUStrategy, this class can only be created + once per call to `tf.tpu.experimental.initialize_tpu_system`. If you wish to + re-initialize the embedding engine you must re-initialize the tpu as well. + Doing this will clear any variables from TPU, so ensure you have checkpointed + before you do this. If a further instances of the class are needed, + set the `initialize_tpu_embedding` argument to `False`. + + This class can be used to support training large embeddings on TPU. When + creating an instance of this class, you must specify the complete set of + tables and features you expect to lookup in those tables. See the + documentation of `tf.tpu.experimental.embedding.TableConfig` and + `tf.tpu.experimental.embedding.FeatureConfig` for more details on the complete + set of options. We will cover the basic usage here. + + NOTE: multiple `FeatureConfig` objects can use the same `TableConfig` object, + allowing different features to share the same table: + + ```python + table_config_one = tf.tpu.experimental.embedding.TableConfig( + vocabulary_size=..., + dim=...) + table_config_two = tf.tpu.experimental.embedding.TableConfig( + vocabulary_size=..., + dim=...) + feature_config = { + 'feature_one': tf.tpu.experimental.embedding.FeatureConfig( + table=table_config_one), + 'feature_two': tf.tpu.experimental.embedding.FeatureConfig( + table=table_config_one), + 'feature_three': tf.tpu.experimental.embedding.FeatureConfig( + table=table_config_two)} + ``` + + There are two modes under which the `TPUEmbedding` class can used. This + depends on if the class was created under a `TPUStrategy` scope or not. + + Under `TPUStrategy`, we allow access to the method `enqueue`, `dequeue` and + `apply_gradients`. We will show examples below of how to use these to train + and evaluate your model. Under CPU, we only access to the `embedding_tables` + property which allow access to the embedding tables so that you can use them + to run model evaluation/prediction on CPU. + + First lets look at the `TPUStrategy` mode. Initial setup looks like: + + ```python + strategy = tf.distribute.TPUStrategy(...) + with strategy.scope(): + embedding = tf.tpu.experimental.embedding.TPUEmbedding( + feature_config=feature_config, + optimizer=tf.tpu.experimental.embedding.SGD(0.1)) + ``` + + When creating a distributed dataset that is to be passed to the enqueue + operation a special input option must be specified: + + ```python + distributed_dataset = ( + strategy.distribute_datasets_from_function( + dataset_fn=..., + options=tf.distribute.InputOptions( + experimental_fetch_to_device=False)) + dataset_iterator = iter(distributed_dataset) + ``` + + Different feature inputs can have different shapes. For dense and sparse + tensor, rank 2 and above is supported. For ragged tensor, although only rank 2 + is supported, you can specify the output shape to be rank 2 and above. The + output shape specified in the FeatureConfig has the first priority. The input + shape passed in build method has second priority and the input shapes + auto detected from input feature has the lowest priority. The latter two will + be converted to output shapes by omitting the last dimension. If the lower + priority one has output shapes which don't match the former one. A ValueError + will be raised. Only when the former one has undefined output shapes, the + latter one can override. + + NOTE: All batches passed to the layer can have different input shapes. But + these input shapes need to match with the output shapes set by either + `FeatureConfig` or build method except for ragged tensor. Only 2D + ragged tensor with output shape set to higher dimensions is allowed as + long as the total number of elements matches. All subsequent calls must have + the same input shapes. In the event that the input shapes cannot be + automatically determined by the enqueue method, you must call + the build method with the input shapes or provide output shapes in the + `FeatureConfig` to initialize the layer. + + To use this API on TPU you should use a custom training loop. Below is an + example of a training and evaluation step: + + ```python + @tf.function + def training_step(dataset_iterator, num_steps): + def tpu_step(tpu_features): + with tf.GradientTape() as tape: + activations = embedding.dequeue() + tape.watch(activations) + model_output = model(activations) + loss = ... # some function of labels and model_output + + embedding_gradients = tape.gradient(loss, activations) + embedding.apply_gradients(embedding_gradients) + # Insert your model gradient and optimizer application here + + for _ in tf.range(num_steps): + embedding_features, tpu_features = next(dataset_iterator) + embedding.enqueue(embedding_features, training=True) + strategy.run(tpu_step, args=(tpu_features, )) + + @tf.function + def evaluation_step(dataset_iterator, num_steps): + def tpu_step(tpu_features): + activations = embedding.dequeue() + model_output = model(activations) + # Insert your evaluation code here. + + for _ in tf.range(num_steps): + embedding_features, tpu_features = next(dataset_iterator) + embedding.enqueue(embedding_features, training=False) + strategy.run(tpu_step, args=(tpu_features, )) + ``` + + NOTE: The calls to `enqueue` have `training` set to `True` when + `embedding.apply_gradients` is used and set to `False` when + `embedding.apply_gradients` is not present in the function. If you don't + follow this pattern you may cause an error to be raised or the tpu may + deadlock. + + In the above examples, we assume that the user has a dataset which returns + a tuple where the first element of the tuple matches the structure of what + was passed as the `feature_config` argument to the object initializer. Also we + utilize `tf.range` to get a `tf.while_loop` in order to increase performance. + + When checkpointing your model, you should include your + `tf.tpu.experimental.embedding.TPUEmbedding` object in the checkpoint. It is a + trackable object and saving it will save the embedding tables and their + optimizer slot variables: + + ```python + checkpoint = tf.train.Checkpoint(model=model, embedding=embedding) + checkpoint.save(...) + ``` + + On CPU, only the `embedding_table` property is usable. This will allow you to + restore a checkpoint to the object and have access to the table variables: + + ```python + model = model_fn(...) + embedding = tf.tpu.experimental.embedding.TPUEmbedding( + feature_config=feature_config, + optimizer=tf.tpu.experimental.embedding.SGD(0.1)) + checkpoint = tf.train.Checkpoint(model=model, embedding=embedding) + checkpoint.restore(...) + + tables = embedding.embedding_tables + ``` + + You can now use table in functions like `tf.nn.embedding_lookup` to perform + your embedding lookup and pass to your model. + + """ + + def __init__( + self, + feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic + optimizer: Optional[tpu_embedding_v2_utils._Optimizer], # pylint:disable=protected-access + pipeline_execution_with_tensor_core: bool = False): + """Creates the TPUEmbedding mid level API object. + + ```python + strategy = tf.distribute.TPUStrategy(...) + with strategy.scope(): + embedding = tf.tpu.experimental.embedding.TPUEmbedding( + feature_config=tf.tpu.experimental.embedding.FeatureConfig( + table=tf.tpu.experimental.embedding.TableConfig( + dim=..., + vocabulary_size=...))) + ``` + + Args: + feature_config: A nested structure of + `tf.tpu.experimental.embedding.FeatureConfig` configs. + optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`, + `tf.tpu.experimental.embedding.Adagrad` or + `tf.tpu.experimental.embedding.Adam`. When not created under + TPUStrategy may be set to None to avoid the creation of the optimizer + slot variables, useful for optimizing memory consumption when exporting + the model for serving where slot variables aren't needed. + pipeline_execution_with_tensor_core: If True, the TPU embedding + computations will overlap with the TensorCore computations (and hence + will be one step old). Set to True for improved performance. + + Raises: + ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD, + Adam or Adagrad) or None when created under a TPUStrategy. + """ + self._strategy = distribute_lib.get_strategy() + self._using_tpu = isinstance(self._strategy, (tpu_strategy.TPUStrategy, + tpu_strategy.TPUStrategyV2)) + self._pipeline_execution_with_tensor_core = ( + pipeline_execution_with_tensor_core) + + self._feature_config = feature_config + self._output_shapes = [] + for feature in nest.flatten(feature_config): + self._output_shapes.append(feature.output_shape) + + device_assignment = getattr( + self._strategy.extended, "_device_assignment", None + ) + self._num_cores_per_replica = ( + device_assignment.num_cores_per_replica if device_assignment else None + ) + + # The TPU embedding ops are slightly inconsistent with how they refer to + # tables: + # * The enqueue op takes a parallel list of tensors for input, one of those + # is the table id for the feature which matches the integer index of the + # table in the proto created by _create_config_proto(). + # * The recv_tpu_embedding_activations op emits lookups per table in the + # order from the config proto. + # * The send_tpu_embedding_gradients expects input tensors to be per table + # in the same order as the config proto. + # * Per optimizer load and retrieve ops are specified per table and take the + # table name rather than the table id. + # Thus we must fix a common order to tables and ensure they have unique + # names. + + # Set table order here to the order of the first occurence of the table in a + # feature provided by the user. The order of this struct must be fixed + # to provide the user with deterministic behavior over multiple + # instantiations. + self._table_config = [] + for feature in nest.flatten(feature_config): + if feature.table not in self._table_config: + self._table_config.append(feature.table) + + # Ensure tables have unique names. Also error check the optimizer as we + # specifically don't do that in the TableConfig class to allow high level + # APIs that are built on this to use strings/other classes to represent + # optimizers (before they are passed to this class). + table_names = [] + for i, table in enumerate(self._table_config): + if table.optimizer is None: + # TODO(bfontain) Should we allow some sort of optimizer merging here? + table.optimizer = optimizer + if ((table.optimizer is not None or self._using_tpu) and + not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)): # pylint: disable=protected-access + raise ValueError("{} is an unsupported optimizer class. Please pass an " + "instance of one of the optimizer classes under " + "tf.tpu.experimental.embedding.".format( + type(table.optimizer))) + if table.name is None: + table.name = "table_{}".format(i) + if table.name in table_names: + raise ValueError("Tables must have a unique name. " + f"Multiple tables with name {table.name} found.") + table_names.append(table.name) + + if self._using_tpu: + # Extract a list of callable learning rates also in fixed order. Each + # table in the config proto will get an index into this list, and we will + # pass this list in the same order after evaluation to the + # send_tpu_embedding_gradients op. + self._dynamic_learning_rates = [] + for table in self._table_config: + if (callable(table.optimizer.learning_rate) and + table.optimizer.learning_rate not in self._dynamic_learning_rates): + self._dynamic_learning_rates.append(table.optimizer.learning_rate) + + # We need to list of host devices for the load/retrieve operations. + self._hosts = tpu_embedding_v2_utils.get_list_of_hosts(self._strategy) + + self._built = False + self._verify_output_shapes_on_enqueue = True + + def build(self, per_replica_input_shapes=None, per_replica_batch_size=None): # pylint:disable=g-bare-generic + """Create the underlying variables and initializes the TPU for embeddings. + + This method creates the underlying variables (including slot variables). If + created under a TPUStrategy, this will also initialize the TPU for + embeddings. + + This function will automatically get called by enqueue, which will try to + determine your output shapes. If this fails, you must manually + call this method before you call enqueue. + + Args: + per_replica_input_shapes: A nested structure of The per replica input + shapes that matches the structure of the feature config. The input + shapes should be the same as the input shape of the feature (except for + ragged tensor) Note that it is fixed and the same per replica input + shapes must be used for both training and evaluation. If you want to + calculate this from the global input shapes, you can use + `num_replicas_in_sync` property of your strategy object. May be set to + None if not created under a TPUStrategy. + per_replica_batch_size: (Deprecated) The per replica batch size that you + intend to use. Note that is fixed and the same batch size must be used + for both training and evaluation. If you want to calculate this from the + global batch size, you can use `num_replicas_in_sync` property of your + strategy object. May be set to None if not created under a TPUStrategy. + + Raises: + ValueError: If per_replica_input_shapes is inconsistent with the output + shapes stored in the feature config or the output shapes get from the + input shapes are not fully defined. + RuntimeError: If tpu embedding is already initialized on TPU. + """ + if self._built: + return + + if self._using_tpu: + # If the tpu embedding is already initialized on TPU, raise runtime error. + # Below logic is not added in `initialize_system_for_tpu_embedding` + # because doing exception control flow in graph mode is difficult. + if tpu_ops.is_tpu_embedding_initialized(): + raise RuntimeError( + "TPU is already initialized for embeddings. This may be caused by " + "using multiple TPUEmbedding instances in a TPU scope which is " + "unsupported") + self._get_and_update_output_shapes_from_input(per_replica_input_shapes, + per_replica_batch_size) + + self._config_proto = self._create_config_proto() + + logging.info("Initializing TPU Embedding engine.") + tpu_embedding_v2_utils.log_tpu_embedding_configuration(self._config_proto) + + @def_function.function + def load_config(): + tpu.initialize_system_for_tpu_embedding(self._config_proto) + + load_config() + logging.info("Done initializing TPU Embedding engine.") + + # Create and load variables and slot variables into the TPU. + # Note that this is a dict of dicts. Keys to the first dict are table names. + # We would prefer to use TableConfigs, but then these variables won't be + # properly tracked by the tracking API. + self._variables = self._create_variables_and_slots() + + self._built = True + + # This is internally conditioned self._built and self._using_tpu + self._load_variables() + + def _maybe_build(self, + output_shapes: Optional[Union[List[int], Iterable]] = None): # pylint:disable=g-bare-generic + if not self._built: + # This can be called while tracing a function, so we wrap the + # initialization code with init_scope so it runs eagerly, this means that + # it will not be included the function graph generated by tracing so that + # we can be sure that we only initialize the TPU for embeddings exactly + # once. + with ops.init_scope(): + self.build(output_shapes) + + def _get_and_update_output_shapes_from_input( + self, + per_replica_input_shapes: Optional[List[TensorShape]] = None, + per_replica_batch_size: Optional[int] = None): + """Get and update the per replica output shapes from the input.""" + per_replica_output_shapes = None + if per_replica_batch_size and per_replica_input_shapes is None: + logging.warning( + "per_replica_batch_size argument will be deprecated, please specify " + "all the input shapes using per_replica_input_shapes argument.") + per_replica_output_shapes = self._get_output_shapes_from_batch_size( + per_replica_batch_size) + + # Update the input shapes if provided. + if per_replica_input_shapes is not None: + if isinstance(per_replica_input_shapes, int): + logging.warning( + "Passing batch size to per_replica_input_shapes argument will be" + " deprecated, please specify all the input shapes using" + " per_replica_input_shapes argument.") + per_replica_output_shapes = self._get_output_shapes_from_batch_size( + per_replica_input_shapes) + else: + nest.assert_same_structure( + nest.flatten(per_replica_input_shapes), + nest.flatten(self._feature_config)) + + # Convert the nested structure to list. + per_replica_input_shapes = nest.flatten(per_replica_input_shapes) + + per_replica_output_shapes = self._get_output_shapes_from_input_shapes( + per_replica_input_shapes) + + if per_replica_output_shapes is not None: + + # Check the output shapes with existing output shapes setting. + self._check_output_shapes(per_replica_output_shapes) + + # Update the output shapes with existing output shapes setting. + # This is necessary Because the output shapes might be missing from + # the feature config, the usr can set it: + # 1. calling the build method + # 2. output shapes auto detected when calling the dequeue method for + # for the first time. The dequeue method will call build method + # with the output shapes. + # Either these two situations will lead to an update to the existing + # output shapes. + self._update_output_shapes(per_replica_output_shapes) + + # Check if the output shapes are fully defined. This is required in order + # to set them in the feature descriptor field of the tpu embedding config + # proto. + self._check_output_shapes_fully_defined() + + def _get_output_shapes_from_input_shapes( + self, input_shapes: List[TensorShape]) -> List[TensorShape]: + """Get output shapes from the flattened input shapes list.""" + output_shapes = [] + for input_shape, feature in zip(input_shapes, + nest.flatten(self._feature_config)): + if input_shape.rank is None or input_shape.rank < 1: + raise ValueError( + "Received input tensor of shape {}. Rank must be 1 and above" + .format(input_shape)) + # Update the input shape with the max sequence length. Only update when + # 1. Input feature is 2D ragged or sparse tensor. + # 2. Output shape is not set in the feature config and the max sequence + # length is set. + if (len(input_shape) == 2 and input_shape[-1] != 1 and + not feature.output_shape and feature.max_sequence_length > 0): + input_shape_list = input_shape.as_list() + input_shape_list.insert( + len(input_shape_list) - 1, feature.max_sequence_length) + input_shape = TensorShape(input_shape_list) + if input_shape.rank == 1: + output_shapes.append(input_shape) + else: + output_shapes.append(input_shape[:-1]) + return output_shapes + + @property + def embedding_tables( + self + ) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]: + """Returns a dict of embedding tables, keyed by `TableConfig`. + + This property only works when the `TPUEmbedding` object is created under a + non-TPU strategy. This is intended to be used to for CPU based lookup when + creating a serving checkpoint. + + Returns: + A dict of embedding tables, keyed by `TableConfig`. + + Raises: + RuntimeError: If object was created under a `TPUStrategy`. + """ + # We don't support returning tables on TPU due to their sharded nature and + # the fact that when using a TPUStrategy: + # 1. Variables are stale and are only updated when a checkpoint is made. + # 2. Updating the variables won't affect the actual tables on the TPU. + if self._using_tpu: + if save_context.in_save_context(): + return {table: self._variables[table.name]["parameters"].variables[0] + for table in self._table_config} + raise RuntimeError("Unable to retrieve embedding tables when using a TPU " + "strategy. If you need access, save your model, " + "create this object under a CPU strategy and restore.") + + self._maybe_build(None) + + # Only return the tables and not the slot variables. On CPU this are honest + # tf.Variables. + return {table: self._variables[table.name]["parameters"] + for table in self._table_config} + + def _create_config_proto( + self + ) -> tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration: + """Creates the TPUEmbeddingConfiguration proto. + + This proto is used to initialize the TPU embedding engine. + + Returns: + A TPUEmbeddingConfiguration proto. + """ + + config_proto = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration() + + # Map each callable dynamic learning rate to its in index in the list. + # The learning rate index is the index of the dynamic learning rate for this + # table (if it exists) in the list we created at initialization. We don't + # simply create one learning rate index per table as this has extremely bad + # performance characteristics. The more separate optimization configurations + # we have, the worse the performance will be. + learning_rate_index = {r: i for i, r in enumerate( + self._dynamic_learning_rates)} + + for table in self._table_config: + table._set_table_descriptor( # pylint: disable=protected-access + config_proto.table_descriptor.add(), + self._strategy.extended.num_hosts, + learning_rate_index) + + table_to_id = {table: i for i, table in enumerate(self._table_config)} + + # Set feature descriptor field in the config proto. + for feature, output_shape in zip( + nest.flatten(self._feature_config), self._output_shapes): + feature_descriptor = config_proto.feature_descriptor.add() + + if feature.name: + feature_descriptor.name = feature.name + + feature_descriptor.table_id = table_to_id[feature.table] + # The input shape of the feature is the actual shape of the input tensor + # except the last dimension because the last dimension will always be + # reduced. + feature_descriptor.input_shape.extend(output_shape.as_list()) + + # Always set mode to training, we override the mode during enqueue. + config_proto.mode = ( + tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TRAINING) + + num_replica = self._strategy.num_replicas_in_sync + num_cores_per_replica = self._num_cores_per_replica or 1 + + config_proto.num_hosts = self._strategy.extended.num_hosts + config_proto.num_tensor_cores = num_replica * num_cores_per_replica + + # TODO(bfontain): Allow users to pick MOD for the host sharding. + config_proto.sharding_strategy = ( + tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.DIV_DEFAULT) + config_proto.pipeline_execution_with_tensor_core = ( + self._pipeline_execution_with_tensor_core) + + if self._num_cores_per_replica: + config_proto.spmd_sharding.enabled = True + config_proto.spmd_sharding.num_cores_per_replica = ( + self._num_cores_per_replica + ) + + return config_proto + + def apply_gradients(self, gradients, name: Optional[Text] = None): + """Applies the gradient update to the embedding tables. + + If a gradient of `None` is passed in any position of the nested structure, + then an gradient update with a zero gradient is applied for that feature. + For optimizers like SGD or Adagrad, this is the same as applying no update + at all. For lazy Adam and other sparsely applied optimizers with decay, + ensure you understand the effect of applying a zero gradient. + + ```python + strategy = tf.distribute.TPUStrategy(...) + with strategy.scope(): + embedding = tf.tpu.experimental.embedding.TPUEmbedding(...) + + distributed_dataset = ( + strategy.distribute_datasets_from_function( + dataset_fn=..., + options=tf.distribute.InputOptions( + experimental_fetch_to_device=False)) + dataset_iterator = iter(distributed_dataset) + + @tf.function + def training_step(): + def tpu_step(tpu_features): + with tf.GradientTape() as tape: + activations = embedding.dequeue() + tape.watch(activations) + + loss = ... # some computation involving activations + + embedding_gradients = tape.gradient(loss, activations) + embedding.apply_gradients(embedding_gradients) + + embedding_features, tpu_features = next(dataset_iterator) + embedding.enqueue(embedding_features, training=True) + strategy.run(tpu_step, args=(tpu_features, )) + + training_step() + ``` + + Args: + gradients: A nested structure of gradients, with structure matching the + `feature_config` passed to this object. + name: A name for the underlying op. + + Raises: + RuntimeError: If called when object wasn't created under a `TPUStrategy` + or if not built (either by manually calling build or calling enqueue). + ValueError: If a non-`tf.Tensor` non-`None` gradient is passed in, or a + `tf.Tensor` of the incorrect shape is passed in. Also if + the size of any sequence in `gradients` does not match corresponding + sequence in `feature_config`. + TypeError: If the type of any sequence in `gradients` does not match + corresponding sequence in `feature_config`. + """ + if not self._using_tpu: + raise RuntimeError("apply_gradients is not valid when TPUEmbedding " + "object is not created under a TPUStrategy.") + + if not self._built: + raise RuntimeError("apply_gradients called on unbuilt TPUEmbedding " + "object. Please either call enqueue first or manually " + "call the build method.") + + num_cores_per_replica = self._num_cores_per_replica or 1 + + nest.assert_same_structure(self._feature_config, gradients) + updated_gradients = [] + for (path, gradient), feature, output_shape in zip( + nest.flatten_with_joined_string_paths(gradients), + nest.flatten(self._feature_config), self._output_shapes): + full_output_shape = [x * num_cores_per_replica for x in output_shape] + [ + feature.table.dim + ] + if gradient is not None and not isinstance(gradient, tensor_lib.Tensor): + raise ValueError( + f"found non-tensor type: {type(gradient)} at path {path}.") + if gradient is not None: + if gradient.shape != full_output_shape: + raise ValueError("Found gradient of shape {} at path {}. Expected " + "shape {}.".format(gradient.shape, path, + full_output_shape)) + else: + # No gradient for this feature, since we must give a gradient for all + # features, pass in a zero tensor here. Note that this is not correct + # for all optimizers. + logging.warning( + "No gradient passed for feature %s, sending zero " + "gradient. This may not be correct behavior for certain " + "optimizers like Adam.", path) + gradient = array_ops.zeros(full_output_shape, dtype=dtypes.float32) + # Some gradients can be passed with op which shape is not correctly set. + # This ensures that the shape of the gradient is correctly set. + updated_gradients.append( + array_ops.reshape(gradient, shape=gradient.shape)) + op = tpu_ops.send_tpu_embedding_gradients( + inputs=updated_gradients, + learning_rates=[ + math_ops.cast(fn(), dtype=dtypes.float32) + for fn in self._dynamic_learning_rates + ], + config=self._config_proto.SerializeToString()) + + # Apply the name tag to the op. + if name is not None: + _add_key_attr(op, name) + + def dequeue(self, name: Optional[Text] = None): + """Get the embedding results. + + Returns a nested structure of `tf.Tensor` objects, matching the structure of + the `feature_config` argument to the `TPUEmbedding` class. The output shape + of the tensors is `(*output_shape, dim)`, `dim` is the dimension of the + corresponding `TableConfig`. For output_shape, there are three places where + it can be set. + 1. FeatureConfig provided in the __init__ function. + 2. Per_replica_output_shapes by directly calling the build method + after initializing the tpu embedding class. + 3. Auto detected from the shapes of the input feature. + The priority of these places is the exact same order. + + ```python + strategy = tf.distribute.TPUStrategy(...) + with strategy.scope(): + embedding = tf.tpu.experimental.embedding.TPUEmbedding(...) + + distributed_dataset = ( + strategy.distribute_datasets_from_function( + dataset_fn=..., + options=tf.distribute.InputOptions( + experimental_fetch_to_device=False)) + dataset_iterator = iter(distributed_dataset) + + @tf.function + def training_step(): + def tpu_step(tpu_features): + with tf.GradientTape() as tape: + activations = embedding.dequeue() + tape.watch(activations) + + loss = ... # some computation involving activations + + embedding_gradients = tape.gradient(loss, activations) + embedding.apply_gradients(embedding_gradients) + + embedding_features, tpu_features = next(dataset_iterator) + embedding.enqueue(embedding_features, training=True) + strategy.run(tpu_step, args=(tpu_features, )) + + training_step() + ``` + + Args: + name: A name for the underlying op. + + Returns: + A nested structure of tensors, with the same structure as `feature_config` + passed to this instance of the `TPUEmbedding` object. + + Raises: + RuntimeError: If called when object wasn't created under a `TPUStrategy` + or if not built (either by manually calling build or calling enqueue). + """ + if not self._using_tpu: + raise RuntimeError("dequeue is not valid when TPUEmbedding object is not " + "created under a TPUStrategy.") + + if not self._built: + raise RuntimeError("dequeue called on unbuilt TPUEmbedding object. " + "Please either call enqueue first or manually call " + "the build method.") + + # The activations returned by this op are per feature. + activations = tpu_ops.recv_tpu_embedding_activations( + num_outputs=len(self._config_proto.feature_descriptor), + config=self._config_proto.SerializeToString()) + + # Apply the name tag to the op. + if name is not None: + _add_key_attr(activations[0].op, name) + + # Pack the list back into the same nested structure as the features. + return nest.pack_sequence_as(self._feature_config, activations) + + def _create_variables_and_slots( + self + ) -> Dict[Text, Dict[Text, tf_variables.Variable]]: + """Create variables for TPU embeddings. + + Note under TPUStrategy this will ensure that all creations happen within a + variable creation scope of the sharded variable creator. + + Returns: + A dict of dicts. The outer dict is keyed by the table names and the inner + dicts are keyed by 'parameters' and the slot variable names. + """ + + def create_variables(table): + """Create all variables.""" + variable_shape = (table.vocabulary_size, table.dim) + + def getter(name, shape, dtype, initializer, trainable): + del shape + # _add_variable_with_custom_getter clears the shape sometimes, so we + # take the global shape from outside the getter. + initial_value = functools.partial(initializer, variable_shape, + dtype=dtype) + return tf_variables.Variable( + name=name, + initial_value=initial_value, + shape=variable_shape, + dtype=dtype, + trainable=trainable) + + def variable_creator(name, initializer, trainable=True): + # use add_variable_with_custom_getter here so that we take advantage of + # the checkpoint loading to allow restore before the variables get + # created which avoids double initialization. + return self._add_variable_with_custom_getter( + name=name, + initializer=initializer, + shape=variable_shape, + dtype=dtypes.float32, + getter=getter, + trainable=trainable) + + parameters = variable_creator(table.name, table.initializer, + trainable=not self._using_tpu) + + def slot_creator(name, initializer): + return variable_creator(table.name + "/" + name, + initializer, + False) + + if table.optimizer is not None: + slot_vars = table.optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access + else: + slot_vars = {} + slot_vars["parameters"] = parameters + return slot_vars + + # Store tables based on name rather than TableConfig as we can't track + # through dicts with non-string keys, i.e. we won't be able to save. + variables = {} + for table in self._table_config: + if not self._using_tpu: + variables[table.name] = create_variables(table) + else: + with variable_scope.variable_creator_scope( + make_sharded_variable_creator(self._hosts)): + variables[table.name] = create_variables(table) + + return variables + + def _load_variables(self): + # Only load the variables if we are: + # 1) Using TPU + # 2) Variables are created + # 3) Not in save context (except if running eagerly) + if self._using_tpu and self._built and not ( + not context.executing_eagerly() and save_context.in_save_context()): + _load_variables_impl(self._config_proto.SerializeToString(), + self._hosts, + self._variables, + self._table_config) + + def _retrieve_variables(self): + # Only retrieve the variables if we are: + # 1) Using TPU + # 2) Variables are created + # 3) Not in save context (except if running eagerly) + if self._using_tpu and self._built and not ( + not context.executing_eagerly() and save_context.in_save_context()): + _retrieve_variables_impl(self._config_proto.SerializeToString(), + self._hosts, + self._variables, + self._table_config) + + # Some helper functions for the below enqueue function. + def _add_data_for_tensor(self, tensor, weight, indices, values, weights, + int_zeros, float_zeros, path): + if weight is not None: + raise ValueError( + "Weight specified for dense input {}, which is not allowed. " + "Weight will always be 1 in this case.".format(path)) + # For tensors, there are no indices and no weights. + indices.append(int_zeros) + values.append(math_ops.cast(array_ops.reshape(tensor, [-1]), dtypes.int64)) + weights.append(float_zeros) + + def _add_data_for_sparse_tensor(self, tensor, weight, indices, values, + weights, int_zeros, float_zeros, path, + feature): + sample_indices = math_ops.cast(tensor.indices, dtypes.int32) + if tensor.shape.rank == 2: + if not feature.output_shape and feature.max_sequence_length > 0: + # Add one dimension to the last axis. + sample_indices = array_ops.pad( + sample_indices, paddings=[[0, 0], [0, 1]]) + else: + if feature.max_sequence_length > 0: + logging.warning( + ( + "Input tensor is rank %d which is above 2, the" + " max_sequence_length setting will be ignored." + ), + tensor.shape.rank, + ) + indices.append(sample_indices) + values.append(math_ops.cast(tensor.values, dtypes.int64)) + # If we have weights they must be a SparseTensor. + if weight is not None: + if not isinstance(weight, sparse_tensor.SparseTensor): + raise ValueError("Weight for {} is type {} which does not match " + "type input which is SparseTensor.".format( + path, type(weight))) + weights.append(math_ops.cast(weight.values, dtypes.float32)) + else: + weights.append(float_zeros) + + def _add_data_for_ragged_tensor(self, tensor, weight, row_splits, values, + weights, int_zeros, float_zeros, path, + feature): + row_splits.append(math_ops.cast(tensor.row_splits, dtypes.int32)) + values.append(math_ops.cast(tensor.values, dtypes.int64)) + # If we have weights they must be a RaggedTensor. + if weight is not None: + if not isinstance(weight, ragged_tensor.RaggedTensor): + raise ValueError("Weight for {} is type {} which does not match " + "type input which is RaggedTensor.".format( + path, type(weight))) + weights.append(math_ops.cast(weight.values, dtypes.float32)) + else: + weights.append(float_zeros) + + def _generate_enqueue_op( + self, + flat_inputs: List[internal_types.NativeObject], + flat_weights: List[Optional[internal_types.NativeObject]], + flat_features: List[tpu_embedding_v2_utils.FeatureConfig], + device_ordinal: int, + mode_override: Text + ) -> ops.Operation: + """Outputs a the enqueue op given the inputs and weights. + + Args: + flat_inputs: A list of input tensors. + flat_weights: A list of input weights (or None) of the same length as + flat_inputs. + flat_features: A list of FeatureConfigs of the same length as flat_inputs. + device_ordinal: The device to create the enqueue op for. + mode_override: A tensor containing the string "train" or "inference". + + Returns: + The enqueue op. + """ + # Combiners are per table, list in the same order as the table order. + combiners = [table.combiner for table in self._table_config] + + # These parallel arrays will be the inputs to the enqueue op. + # sample_indices for sparse, row_splits for ragged. + indices_or_row_splits = [] + values = [] + weights = [] + + # We have to supply a empty/zero tensor in a list position where we don't + # have data (e.g. indices for standard Tensor input, weight when no weight + # is specified). We create one op here per call, so that we reduce the + # graph size. + int_zeros = array_ops.zeros((0,), dtype=dtypes.int32) + float_zeros = array_ops.zeros((0,), dtype=dtypes.float32) + + # In the following loop we insert casts so that everything is either int32 + # or float32. This is because op inputs which are lists of tensors must be + # of the same type within the list. Moreover the CPU implementations of + # these ops cast to these types anyway, so we don't lose any data by casting + # early. + for inp, weight, (path, feature) in zip( + flat_inputs, flat_weights, flat_features): + if isinstance(inp, tensor_lib.Tensor): + self._add_data_for_tensor(inp, weight, indices_or_row_splits, values, + weights, int_zeros, float_zeros, path) + elif isinstance(inp, sparse_tensor.SparseTensor): + self._add_data_for_sparse_tensor(inp, weight, indices_or_row_splits, + values, weights, int_zeros, + float_zeros, path, feature) + elif isinstance(inp, ragged_tensor.RaggedTensor): + self._add_data_for_ragged_tensor(inp, weight, indices_or_row_splits, + values, weights, int_zeros, + float_zeros, path, feature) + else: + raise ValueError("Input {} is of unknown type {}. Please only pass " + "Tensor, SparseTensor or RaggedTensor as input to " + "enqueue.".format(path, type(inp))) + + return tpu_ops.enqueue_tpu_embedding_arbitrary_tensor_batch( + sample_indices_or_row_splits=indices_or_row_splits, + embedding_indices=values, + aggregation_weights=weights, + mode_override=mode_override, + device_ordinal=device_ordinal, + combiners=combiners) + + def _raise_error_for_incorrect_control_flow_context(self): + """Raises an error if we are not in the TPUReplicateContext.""" + # Do not allow any XLA control flow (i.e. control flow in between a + # TPUStrategy's run call and the call to this function), as we can't + # extract the enqueue from the head when in XLA control flow. + graph = ops.get_default_graph() + in_tpu_ctx = False + while graph is not None: + ctx = graph._get_control_flow_context() # pylint: disable=protected-access + while ctx is not None: + if isinstance(ctx, tpu_replication.TPUReplicateContext): + in_tpu_ctx = True + break + ctx = ctx.outer_context + if in_tpu_ctx: + break + graph = getattr(graph, "outer_graph", None) + if graph != ops.get_default_graph() and in_tpu_ctx: + raise RuntimeError( + "Current graph {} does not match graph which contains " + "TPUReplicateContext {}. This is most likely due to the fact that " + "enqueueing embedding data is called inside control flow or a " + "tf.function inside `strategy.run`. This is not supported because " + "outside compilation fails to extract the enqueue ops as the head of " + "a computation.".format(ops.get_default_graph(), graph)) + return in_tpu_ctx + + def _raise_error_for_non_direct_inputs(self, features): + """Checks all tensors in features to see if they are a direct input.""" + + # expand_composites here is important: as composite tensors pass through + # tpu.replicate, they get 'flattened' into their component tensors and then + # repacked before being passed to the tpu function. In means that it is the + # component tensors which are produced by an op with the + # "_tpu_input_identity" attribute. + for path, input_tensor in nest.flatten_with_joined_string_paths( + features, expand_composites=True): + if input_tensor.op.type == "Placeholder": + continue + try: + is_input = input_tensor.op.get_attr("_tpu_input_identity") + except ValueError: + is_input = False + if not is_input: + raise ValueError( + "Received input tensor {} which is the output of op {} (type {}) " + "which does not have the `_tpu_input_identity` attr. Please " + "ensure that the inputs to this layer are taken directly from " + "the arguments of the function called by " + "strategy.run. Two possible causes are: dynamic batch size " + "support or you are using a keras layer and are not passing " + "tensors which match the dtype of the `tf.keras.Input`s." + "If you are triggering dynamic batch size support, you can " + "disable it by passing tf.distribute.RunOptions(" + "experimental_enable_dynamic_batch_size=False) to the options " + "argument of strategy.run().".format(path, + input_tensor.op.name, + input_tensor.op.type)) + + def _raise_error_for_inputs_not_on_cpu(self, flat_inputs, flat_paths): + """Checks all tensors in features to see are placed on the CPU.""" + + def check_device(path, device_string): + spec = tf_device.DeviceSpec.from_string(device_string) + if spec.device_type == "TPU": + raise ValueError( + "Received input tensor {} which is on a TPU input device {}. Input " + "tensors for TPU embeddings must be placed on the CPU. Please " + "ensure that your dataset is prefetching tensors to the host by " + "setting the 'experimental_fetch_to_device' option of the " + "dataset distribution function. See the documentation of the " + "enqueue method for an example.".format(path, device_string)) + + # expand_composites here is important, we need to check the device of each + # underlying tensor. + for input_tensor, input_path in zip(flat_inputs, flat_paths): + if nest.is_nested_or_composite(input_tensor): + input_tensors = nest.flatten(input_tensor, expand_composites=True) + else: + input_tensors = [input_tensor] + for t in input_tensors: + if (t.op.type == "Identity" and + t.op.inputs[0].op.type == "TPUReplicatedInput"): + for tensor in t.op.inputs[0].op.inputs: + check_device(input_path, tensor.device) + else: + check_device(input_path, t.device) + + def enqueue( + self, + features, + weights=None, + training: bool = True, + name: Optional[Text] = None, + device: Optional[Text] = None): + """Enqueues id tensors for embedding lookup. + + This function enqueues a structure of features to be looked up in the + embedding tables. We expect that the input shapes of each of the tensors in + features matches the output shapes set via FeatureConfig or build method + (if any). the output shapes will be auto detected based on the input shapes + with the max_sequence_length or output shape setting in the FeatureConfig. + Note that the output shapes is based on per replica batch size. + If your input dataset is batched to the global batch size and you use + `tf.distribute.TPUStrategy`'s `experimental_distribute_dataset` + or if you use `distribute_datasets_from_function` and batch + to the per core batch size computed by the context passed to your input + function, the output shapes should match automatically. + + The auto detected the output shapes: + 1. For dense tensor, if rank 2 or above, make sure the tensor has last + dimension as 1. The output shape will be the input shape excluding + the last dimension. + 2. For sparse tensor, make sure the tensor has rank 2 and above. + a. If feature config has max_sequence_length equals 0 or output shape + set (the max_sequence_length setting will be ignored), the + output shape will be the input shape excluding the last dimension. + b. Otherwise, if the tensor is rank 2, the output shape will be input + shape with last dimension set as max_sequence_length. If the + tensor is above rank 2, the output shape will be the input shape + excluding the last dimension and the last dimension of the output + shape will be set to max_sequence_length. + 3. For ragged tensor, make sure the tensor has rank 2. + a. If feature config has max_sequence_length equals 0 or output shape + set (the max_sequence_length setting will be ignored), the + output shape will be the input shape excluding the last dimension. + b. Otherwise, the output shape will be the input shape excluding the + last dimension and the last dimension of the output shape will be + set to max_sequence_length. + + ```python + strategy = tf.distribute.TPUStrategy(...) + with strategy.scope(): + embedding = tf.tpu.experimental.embedding.TPUEmbedding(...) + + distributed_dataset = ( + strategy.distribute_datasets_from_function( + dataset_fn=..., + options=tf.distribute.InputOptions( + experimental_fetch_to_device=False)) + dataset_iterator = iter(distributed_dataset) + + @tf.function + def training_step(): + def tpu_step(tpu_features): + with tf.GradientTape() as tape: + activations = embedding.dequeue() + tape.watch(activations) + + loss = ... # some computation involving activations + + embedding_gradients = tape.gradient(loss, activations) + embedding.apply_gradients(embedding_gradients) + + embedding_features, tpu_features = next(dataset_iterator) + embedding.enqueue(embedding_features, training=True) + strategy.run(tpu_step, args=(tpu_features,)) + + training_step() + ``` + + NOTE: You should specify `training=True` when using + `embedding.apply_gradients` as above and `training=False` when not using + `embedding.apply_gradients` (e.g. for frozen embeddings or when doing + evaluation). + + For finer grained control, in the above example the line + + ``` + embedding.enqueue(embedding_features, training=True) + ``` + + may be replaced with + + ``` + per_core_embedding_features = self.strategy.experimental_local_results( + embedding_features) + + def per_core_enqueue(ctx): + core_id = ctx.replica_id_in_sync_group + device = strategy.extended.worker_devices[core_id] + embedding.enqueue(per_core_embedding_features[core_id], + device=device) + + strategy.experimental_distribute_values_from_function( + per_core_queue_inputs) + ``` + + Args: + features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or + `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs + will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor` + or `tf.RaggedTensor` is supported per call. + weights: If not `None`, a nested structure of `tf.Tensor`s, + `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except + that the tensors should be of float type (and they will be downcast to + `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the + same for the parallel entries from `features` and similarly for + `tf.RaggedTensor`s we assume the row_splits are the same. + training: Defaults to `True`. If `False`, enqueue the batch as inference + batch (forward pass only). Do not call `apply_gradients` when this is + `False` as this may lead to a deadlock. + name: A name for the underlying op. + device: The device name (e.g. '/task:0/device:TPU:2') where this batch + should be enqueued. This should be set if and only if features is not a + `tf.distribute.DistributedValues` and enqueue is not being called + inside a TPU context (e.g. inside `TPUStrategy.run`). + + Raises: + ValueError: When called inside a strategy.run call and input is not + directly taken from the args of the `strategy.run` call. Also if + the size of any sequence in `features` does not match corresponding + sequence in `feature_config`. Similarly for `weights`, if not `None`. + If input shapes of features is unequal or different from a previous + call. + RuntimeError: When called inside a strategy.run call and inside XLA + control flow. If batch_size is not able to be determined and build was + not called. + TypeError: If the type of any sequence in `features` does not match + corresponding sequence in `feature_config`. Similarly for `weights`, if + not `None`. + """ + if not self._using_tpu: + raise RuntimeError("enqueue is not valid when TPUEmbedding object is not " + "created under a TPUStrategy.") + + in_tpu_context = self._raise_error_for_incorrect_control_flow_context() + + nest.assert_same_structure(self._feature_config, features) + + if not self._verify_output_shapes_on_enqueue: + if not self._output_shapes or not self._built: + raise ValueError( + "Configured not to check output shapes on each enqueue() call; please " + "ensure build() was called with output shapes to initialize " + "the TPU for embeddings.") + else: + per_replica = device is None + input_shapes = self._get_input_shapes( + features, per_replica, in_tpu_context + ) + + self._maybe_build(input_shapes) + # If is already built, we still need to check if the output shapes matches + # with the previous ones. + self._check_output_shapes( + self._get_output_shapes_from_input_shapes(input_shapes)) + + flat_inputs = nest.flatten(features) + flat_weights = [None] * len(flat_inputs) + if weights is not None: + nest.assert_same_structure(self._feature_config, weights) + flat_weights = nest.flatten(weights) + flat_features = nest.flatten_with_joined_string_paths(self._feature_config) + flat_paths, _ = zip(*flat_features) + + self._raise_error_for_inputs_not_on_cpu(flat_inputs, flat_paths) + # If we are in a tpu_context, automatically apply outside compilation. + if in_tpu_context: + self._raise_error_for_non_direct_inputs(features) + + def generate_enqueue_ops(): + """Generate enqueue ops for outside compilation.""" + # Note that we put array_ops.where_v2 rather than a python if so that + # the op is explicitly create and the constant ops are both in the graph + # even though we don't expect training to be a tensor (and thus generate + # control flow automatically). This need to make it easier to re-write + # the graph later if we need to fix which mode needs to be used. + mode_override = array_ops.where_v2(training, + constant_op.constant("train"), + constant_op.constant("inference")) + # Device ordinal is -1 here, a later rewrite will fix this once the op + # is expanded by outside compilation. + enqueue_op = self._generate_enqueue_op( + flat_inputs, flat_weights, flat_features, device_ordinal=-1, + mode_override=mode_override) + + # Apply the name tag to the op. + if name is not None: + _add_key_attr(enqueue_op, name) + + tpu_replication.outside_compilation(generate_enqueue_ops) + + elif device is None: + mode_override = "train" if training else "inference" + # We generate enqueue ops per device, so we need to gather the all + # features for a single device in to a dict. + # We rely here on the fact that the devices in the PerReplica value occur + # in the same (standard) order as self._strategy.extended.worker_devices. + enqueue_ops = [] + + def _split_fn(ts, idx): + if ts is None: + return None + elif isinstance(ts, tensor_lib.Tensor): + return array_ops.split( + ts, + num_or_size_splits=self._num_cores_per_replica, + axis=0)[idx] + elif isinstance(ts, sparse_tensor.SparseTensor): + return sparse_ops.sparse_split_v2( + sp_input=ts, + num_split=self._num_cores_per_replica, + axis=0)[idx] + else: + raise ValueError("SPMD does not support raggedTensor yet.") + + def _maybe_split(ts_inputs, core_id): + if self._num_cores_per_replica is None: + return ts_inputs + else: + splitter = functools.partial(_split_fn, idx=core_id) + return nest.map_structure(splitter, ts_inputs) + + for replica_id in range(self._strategy.num_replicas_in_sync): + replica_inputs = distribute_utils.select_replica(replica_id, + flat_inputs) + replica_weights = distribute_utils.select_replica(replica_id, + flat_weights) + + if self._num_cores_per_replica: + tpu_devices = self._strategy.extended._tpu_devices[replica_id] # pylint: disable=protected-access + else: + tpu_devices = [self._strategy.extended.worker_devices[replica_id]] + # TPU devices string are like /job:worker/replica:0/task:0/device:TPU:0 + # the device ordinal is the last number + + for core_id in range(self._num_cores_per_replica or 1): + tpu_device = tpu_devices[core_id] + device_ordinal = ( + tf_device.DeviceSpec.from_string(tpu_device).device_index) + + with ops.device(device_util.get_host_for_device(tpu_device)): + enqueue_op = self._generate_enqueue_op( + _maybe_split(replica_inputs, core_id), + _maybe_split(replica_weights, core_id), + flat_features, + device_ordinal=device_ordinal, mode_override=mode_override) + + # Apply the name tag to the op. + if name is not None: + _add_key_attr(enqueue_op, name) + enqueue_ops.append(enqueue_op) + else: + mode_override = "train" if training else "inference" + device_spec = tf_device.DeviceSpec.from_string(device) + if device_spec.device_type != "TPU": + raise ValueError( + "Non-TPU device {} passed to enqueue.".format(device)) + + with ops.device(device_util.get_host_for_device(device)): + enqueue_op = self._generate_enqueue_op( + flat_inputs, flat_weights, flat_features, + device_ordinal=device_spec.device_index, + mode_override=mode_override) + + # Apply the name tag to the op. + if name is not None: + _add_key_attr(enqueue_op, name) + + def _get_input_shapes( + self, tensors, per_replica: bool, in_tpu_context: bool + ) -> List[TensorShape]: + """Get the input shapes from the input tensor.""" + input_shapes = [] + for (path, maybe_tensor), feature in zip( + nest.flatten_with_joined_string_paths(tensors), + nest.flatten(self._feature_config)): + if not in_tpu_context: + tensor = distribute_utils.select_replica(0, maybe_tensor) + else: + tensor = maybe_tensor + + if isinstance(tensor, tensor_lib.Tensor): + input_shapes.append( + self._get_input_shape_for_tensor(tensor, feature, per_replica, path) + ) + elif isinstance(tensor, sparse_tensor.SparseTensor): + input_shapes.append( + self._get_input_shape_for_sparse_tensor( + tensor, feature, per_replica, path + ) + ) + elif isinstance(tensor, ragged_tensor.RaggedTensor): + input_shapes.append( + self._get_input_shape_for_ragged_tensor( + tensor, feature, per_replica, path + ) + ) + return input_shapes + + def _get_input_shape_for_tensor( + self, tensor, feature, per_replica, path + ) -> TensorShape: + """Get the input shape for the dense tensor.""" + shape = tensor.shape.as_list() + if len(shape) < 1: + raise ValueError("Only rank 1 and above dense tensor is supported," + " find rank {} sparse tensor for input {}".format( + len(shape), path)) + if len(shape) > 1 and shape[-1] != 1: + raise ValueError( + "Rank 2 or above dense tensor should have last dimension as 1 " + "as the last dimension will always be reduced. " + "Instead got dense tensor as shape {}".format(shape)) + + if self._num_cores_per_replica and per_replica: + shape[0] = shape[0] // self._num_cores_per_replica + + return TensorShape(shape) + + def _get_input_shape_for_sparse_tensor( + self, tensor, feature, per_replica, path + ) -> TensorShape: + """Get the input shape for the sparse tensor.""" + shape = tensor.shape.as_list() + # Only 2 and above rank sparse tensor is supported. + if len(shape) < 2: + raise ValueError("Only rank 2 and above sparse tensor is supported," + " find rank {} sparse tensor for input {}".format( + len(shape), path)) + if not feature.output_shape and feature.max_sequence_length > 0: + # If the max_sequence_length is set and the output shape for FeatureConfig + # is not set, we modify the shape of the input feature. Only rank 2 + # feature output shape is modified + if len(shape) == 2: + # If the sparse tensor is 2D and max_sequence_length is set, + # we need to add one dimension to the input feature. + shape.insert(len(shape) - 1, feature.max_sequence_length) + + if self._num_cores_per_replica and per_replica and shape[0]: + shape[0] = shape[0] // self._num_cores_per_replica + + return TensorShape(shape) + + def _get_input_shape_for_ragged_tensor( + self, tensor, feature, per_replica, path + ) -> TensorShape: + """Get the input shape for the ragged tensor.""" + del per_replica # unused. + shape = tensor.shape.as_list() + # Only rank 2 ragged tensor is supported. + if len(shape) != 2: + raise ValueError("Only rank 2 ragged tensor is supported," + " find rank {} ragged tensor for input {}".format( + len(shape), path)) + if not feature.output_shape and feature.max_sequence_length > 0: + # If the max_sequence_length is set and the output shape for FeatureConfig + # is not set, add the sequence length as second last dimension of + # the ragged tensor. + shape.insert(len(shape) - 1, feature.max_sequence_length) + + return TensorShape(shape) + + def _update_output_shapes(self, incoming_output_shapes: List[TensorShape]): + """Update the existing output shapes based on the new output shapes. + + The existing output shapes always have higher piority than the new incoming + output shapes. + Args: + incoming_output_shapes: nested structure of TensorShape to override the + existing output shapes. + """ + nest.assert_same_structure(self._output_shapes, incoming_output_shapes) + updated_output_shapes = [] + for old_output_shape, incoming_output_shape in zip(self._output_shapes, + incoming_output_shapes): + if old_output_shape: + updated_output_shapes.append(old_output_shape) + else: + updated_output_shapes.append(incoming_output_shape) + self._output_shapes = updated_output_shapes + + def _check_output_shapes(self, incoming_output_shapes: List[TensorShape]): + """Check the incoming output shapes against the output shapes stored.""" + # The incoming output shape should have the same structure with the existing + # output shapes. + nest.assert_same_structure(self._output_shapes, incoming_output_shapes) + + for (path, _), old_output_shape, incoming_output_shape in zip( + nest.flatten_with_joined_string_paths(self._feature_config), + self._output_shapes, incoming_output_shapes): + # First check if both shapes are not None. + if old_output_shape and incoming_output_shape: + # We skip the check when the incoming output shape is rank 1 or 2 and + # rank of the old output shape is larger. This can happen for + # (sequence) ragged tensor, we push the check down to the enqueue op. + if (len(incoming_output_shape) == 1 or len(incoming_output_shape) + == 2) and len(old_output_shape) > len(incoming_output_shape): + continue + if len(old_output_shape) != len( + incoming_output_shape) or not self._is_tensor_shape_match( + old_output_shape, incoming_output_shape): + raise ValueError( + f"Inconsistent shape founded for input feature {path}, " + f"Output shape is set to be {old_output_shape}, " + f"But got incoming output shape {incoming_output_shape}") + + def _check_output_shapes_fully_defined(self): + """Check if the output shape is fully defined.""" + for (path, _), output_shape in zip( + nest.flatten_with_joined_string_paths(self._feature_config), + self._output_shapes): + if not output_shape.is_fully_defined(): + raise ValueError( + f"Input Feature {path} has output shape set as " + f"{output_shape} which is not fully defined. " + "Please specify the fully defined shape in either FeatureConfig " + "or for the build method.") + + def _is_tensor_shape_match(self, shape_a: TensorShape, + shape_b: TensorShape) -> bool: + """Check if shape b matches with shape a.""" + for s_a, s_b in zip(shape_a.as_list(), shape_b.as_list()): + if s_a and s_b and s_a != s_b: + return False + return True + + def _get_output_shapes_from_batch_size(self, per_replica_batch_size): + """Get the output shapes from the batch size.""" + output_shapes = [] + for feature in nest.flatten(self._feature_config): + if not feature.output_shape and feature.max_sequence_length > 0: + output_shapes.append( + TensorShape([per_replica_batch_size, feature.max_sequence_length])) + else: + output_shapes.append(TensorShape(per_replica_batch_size)) + return output_shapes + + def _create_copy_for_async_checkpoint( + self, feature_config, optimizer, pipeline_execution_with_tensor_core): + """Create a TPUEmbedding copy for checkpoint/async_checkpoint_helper.py.""" + return TPUEmbedding( + feature_config=feature_config, + optimizer=optimizer, + pipeline_execution_with_tensor_core=pipeline_execution_with_tensor_core) + + +@def_function.function +def _load_variables_impl( + config: Text, + hosts: List[Tuple[int, Text]], + variables: Dict[Text, Dict[Text, tf_variables.Variable]], + table_config: tpu_embedding_v2_utils.TableConfig): + """Load embedding tables to onto TPU for each table and host. + + Args: + config: A serialized TPUEmbeddingConfiguration proto. + hosts: A list of CPU devices, on per host. + variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key + is the table name, second key is 'parameters' or the optimizer slot name. + table_config: A list of tf.tpu.experimental.embedding.TableConfig objects. + """ + def select_fn(host_id): + + def select_or_zeros(x): + if host_id >= len(x.variables): + # In the edge case where we have more hosts than variables, due to using + # a small number of rows, we load zeros for the later hosts. We copy + # the shape of the first host's variables, which we assume is defined + # because TableConfig guarantees at least one row. + return array_ops.zeros_like(x.variables[0]) + return x.variables[host_id] + + return select_or_zeros + + for host_id, host in enumerate(hosts): + with ops.device(host): + host_variables = nest.map_structure(select_fn(host_id), variables) + for table in table_config: + table.optimizer._load()( # pylint: disable=protected-access + table_name=table.name, + num_shards=len(hosts), + shard_id=host_id, + config=config, + **host_variables[table.name]) + # Ensure that only the first table/first host gets a config so that we + # don't bloat graph by attaching this large string to each op. + # We have num tables * num hosts of these so for models with a large + # number of tables training on a large slice, this can be an issue. + config = None + + +@def_function.function +def _retrieve_variables_impl( + config: Text, + hosts: List[Tuple[int, Text]], + variables: Dict[Text, Dict[Text, tf_variables.Variable]], + table_config: tpu_embedding_v2_utils.TableConfig): + """Retrieve embedding tables from TPU to host memory. + + Args: + config: A serialized TPUEmbeddingConfiguration proto. + hosts: A list of all the host CPU devices. + variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key + is the table name, second key is 'parameters' or the optimizer slot name. + table_config: A list of tf.tpu.experimental.embedding.TableConfig objects. + """ + for host_id, host in enumerate(hosts): + with ops.device(host): + for table in table_config: + retrieved = table.optimizer._retrieve()( # pylint: disable=protected-access + table_name=table.name, + num_shards=len(hosts), + shard_id=host_id, + config=config) + # When there are no slot variables (e.g with SGD) this returns a + # single tensor rather than a tuple. In this case we put the tensor in + # a list to make the following code easier to write. + if not isinstance(retrieved, tuple): + retrieved = (retrieved,) + + for i, slot in enumerate(["parameters"] + + table.optimizer._slot_names()): # pylint: disable=protected-access + # We must assign the CPU variables the values of tensors that were + # returned from the TPU. + sharded_var = variables[table.name][slot] + if host_id < len(sharded_var.variables): + # In the edge case where we have more hosts than variables, due to + # using a small number of rows, we skip the later hosts. + sharded_var.variables[host_id].assign(retrieved[i]) + # Ensure that only the first table/first host gets a config so that we + # don't bloat graph by attaching this large string to each op. + # We have num tables * num hosts of these so for models with a large + # number of tables training on a large slice, this can be an issue. + config = None + + +def _save_callback(trackables, **unused_kwargs): + for trackable in trackables.values(): + trackable._retrieve_variables() # pylint: disable=protected-access + return [] + + +def _restore_callback(trackables, **unused_kwargs): + for trackable in trackables.values(): + trackable._load_variables() # pylint: disable=protected-access + + +registration.register_tf_checkpoint_saver( + "TPUEmbeddingCallback", + predicate=lambda x: isinstance(x, TPUEmbedding), + save_fn=_save_callback, + restore_fn=_restore_callback, + # Set strict_predicate_restore to `False` to because the isinstance + # predicate check does not pass after a TPUEmbedding object is loaded from + # SavedModel. + strict_predicate_restore=False +) + + +def extract_variable_info( + kwargs) -> Tuple[Text, Tuple[int, ...], dtypes.DType, Callable[[], Any]]: + """Extracts the variable creation attributes from the kwargs. + + Args: + kwargs: a dict of keyword arguments that were passed to a variable creator + scope. + + Returns: + A tuple of variable name, shape, dtype, initialization function. + """ + if (isinstance(kwargs["initial_value"], functools.partial) and ( + "shape" in kwargs["initial_value"].keywords or + kwargs["initial_value"].args)): + # Sometimes shape is passed positionally, sometimes it's passed as a kwarg. + if "shape" in kwargs["initial_value"].keywords: + shape = kwargs["initial_value"].keywords["shape"] + else: + shape = kwargs["initial_value"].args[0] + return (kwargs["name"], shape, + kwargs["initial_value"].keywords.get("dtype", kwargs["dtype"]), + kwargs["initial_value"].func) + elif "shape" not in kwargs or kwargs["shape"] is None or not callable( + kwargs["initial_value"]): + raise ValueError( + "Unable to extract initializer function and shape from {}. Please " + "either pass a function that expects a shape and dtype as the " + "initial value for your variable or functools.partial object with " + "the shape and dtype kwargs set. This is needed so that we can " + "initialize the shards of the ShardedVariable locally.".format( + kwargs["initial_value"])) + else: + return (kwargs["name"], kwargs["shape"], kwargs["dtype"], + kwargs["initial_value"]) + + +def make_sharded_variable_creator( + hosts: List[Text]) -> Callable[..., TPUEmbeddingVariable]: + """Makes a sharded variable creator given a list of hosts. + + Args: + hosts: a list of tensorflow devices on which to shard the tensors. + + Returns: + A variable creator function. + """ + + def sharded_variable_creator( + next_creator: Callable[..., tf_variables.Variable], *args, **kwargs): + """The sharded variable creator.""" + kwargs["skip_mirrored_creator"] = True + + num_hosts = len(hosts) + name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs) + initial_value = kwargs["initial_value"] + rows = shape[0] + cols = shape[1] + partial_partition = rows % num_hosts + full_rows_per_host = rows // num_hosts + # We partition as if we were using MOD sharding: at least + # `full_rows_per_host` rows to `num_hosts` hosts, where the first + # `partial_partition` hosts get an additional row when the number of rows + # is not cleanly divisible. Note that `full_rows_per_host` may be zero. + partitions = ( + [full_rows_per_host + 1] * partial_partition + + [full_rows_per_host] * (num_hosts - partial_partition)) + variables = [] + sharding_aware = "shard_info" in tf_inspect.getargspec(initial_value).args + + # Keep track of offset for sharding aware initializers. + offset = 0 + kwargs["dtype"] = dtype + for i, p in enumerate(partitions): + if p == 0: + # Skip variable creation for empty partitions, resulting from the edge + # case of 'rows < num_hosts'. This is safe because both load/restore + # can handle the missing values. + continue + with ops.device(hosts[i]): + kwargs["name"] = "{}_{}".format(name, i) + kwargs["shape"] = (p, cols) + if sharding_aware: + shard_info = base.ShardInfo(kwargs["shape"], (offset, 0)) + kwargs["initial_value"] = functools.partial( + initial_value, shard_info=shard_info) + offset += p + else: + kwargs["initial_value"] = functools.partial( + unwrapped_initial_value, kwargs["shape"], dtype=dtype) + variables.append(next_creator(*args, **kwargs)) + return TPUEmbeddingVariable(variables, name=name) + return sharded_variable_creator diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_v3.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..c85af8c2478efb473b56455b01d705770cf5ebe3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_embedding_v3.py @@ -0,0 +1,2309 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mid level API for TPU Embeddings With V2 Embedding Accelerator.""" + +import collections +import copy +import functools +import operator +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union + +from absl import logging + +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.python.checkpoint import saveable_compat +from tensorflow.python.distribute import device_util +from tensorflow.python.distribute import distribute_lib +from tensorflow.python.distribute import tpu_strategy +from tensorflow.python.distribute import tpu_util +from tensorflow.python.distribute import tpu_values +from tensorflow.python.distribute import values +from tensorflow.python.distribute import values_util +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import device as tf_device +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import gen_collective_ops +from tensorflow.python.ops import gen_resource_variable_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.ops import variables as tf_variables +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.tpu import _pywrap_tpu_embedding +from tensorflow.python.tpu import tpu_embedding_base +from tensorflow.python.tpu import tpu_embedding_v2_utils +from tensorflow.python.tpu import tpu_replication +from tensorflow.python.tpu.ops import gen_xla_ops as xla_ops +from tensorflow.python.trackable import base +from tensorflow.python.training.saving import saveable_object +from tensorflow.python.util import compat +from tensorflow.python.util import nest +from tensorflow.python.util import tf_inspect +from tensorflow.python.util.tf_export import tf_export + +_PIPELINE_ATTRIBUTE = "_embedding_pipelining" +_PIPELINE_MODE_FORWARD = "forward" +_PIPELINE_MODE_BACKWARD = "backward" + + +class EmbeddingPipeliningContext(control_flow_ops.ControlFlowContext): + """Sets the _embedding_pipelining attribute on all ops created in the scope.""" + + def __init__(self, mode: str, enable: bool): + super().__init__() + self._name = "EmbeddingPipelinigContext" + self._mode = attr_value_pb2.AttrValue(s=compat.as_bytes(mode)) + self._enable = enable + + def to_control_flow_context_def( + self, context_def: Any, export_scope: Any = None + ): + # pylint: disable=useless-super-delegation + # The method is required by `ControlFlowContext`. + super().to_control_flow_context_def(context_def, export_scope) + + def AddOp(self, op: ops.Operation): + # pylint: disable=protected-access + if self._enable: + op._set_attr(_PIPELINE_ATTRIBUTE, self._mode) + if self._outer_context: + self._outer_context.AddOp(op) + + +class TPUEmbeddingShardedSaveable(saveable_object.SaveableObject): + """Defines how to save and restore a shard of TPUEmbedding sharded variable.""" + + def __init__( + self, + variable: tf_variables.Variable, + shard_id: int, + num_shards: int, + shard_dim: int, + name: str, + ): + """Init TPUEmbeddingShardedSaveable.""" + self._shard_id = shard_id + self._variable = variable + + var_offset = [0] * len(variable.shape) + # NOTE: always assume even sharding + var_offset[shard_dim] = shard_id * variable.shape[shard_dim] + fullshape = variable.shape.as_list() + fullshape[shard_dim] = num_shards * fullshape[shard_dim] + save_slice_info = tf_variables.Variable.SaveSliceInfo( + full_name=name, + full_shape=fullshape, + var_offset=var_offset, + var_shape=variable.shape.as_list(), + ) + + spec = saveable_object.SaveSpec( + tensor=variable.read_value, + slice_spec=save_slice_info.spec, + name=name, + dtype=variable.dtype, + device=variable.device, + ) + super().__init__(variable.read_value, [spec], name) + + def restore( + self, + restored_tensors: List[tensor.Tensor], + restored_shapes: List[tensor_shape.TensorShape], + ) -> Any: + del restored_shapes + restored_tensor = restored_tensors[0] + + return values_util.assign_on_device( + self._variable.device, self._variable, restored_tensor + ) + + +@saveable_compat.legacy_saveable_name("") +class TPUEmbeddingShardedVariable( + tpu_values.TPUVariableMixin, values.DistributedVariable +): + """A ShardedVariable class for Embedding tables on TPU.""" + + def _is_mirrored(self) -> bool: + return False + + # Only support sharding on the first dimension. + @property + def shard_dim(self) -> int: + return 0 + + @property + def shape(self) -> tensor_shape.TensorShape: + """Returns the shape of the embedding variable for the current context.""" + local_shape = self._values[0].shape + global_shape = local_shape.as_list() + global_shape[self.shard_dim] = global_shape[self.shard_dim] * len( + self.values + ) + return tensor_shape.TensorShape(global_shape) + + def _write_object_proto(self, proto, options): + super()._write_object_proto(proto, options) + # TODO(b/305882915): Reset the saved model shape to the local shape + # for backward compatibility of users that directly access the full + # variable shape as the shape of values. + proto.variable.shape.CopyFrom(self._values[0].shape.as_proto()) + + def _gather_saveables_for_checkpoint(self) -> Dict[str, Callable[..., Any]]: + """Overrides Trackable method. + + Returns: + A dictionary mapping attribute names to `SaveableObject` factories. + """ + + def _saveable_factory(name=self._common_name): + saveables = [] + num_shards = len(self.values) + for shard_id in range(num_shards): + saveables.append( + TPUEmbeddingShardedSaveable( + self.values[shard_id], + shard_id, + num_shards, + self.shard_dim, + name, + ) + ) + return saveables + + return {base.VARIABLE_VALUE_KEY: _saveable_factory} + + def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): + """Converts a variable to a tensor.""" + # pylint: disable=protected-access + if tpu_util.enclosing_tpu_context() is None: + return self._values[0].read_value() + else: + return self._read_variable_op() + + def read_value(self) -> Any: + if tpu_util.enclosing_tpu_context() is None: + raise NotImplementedError( + "Reading in cross replica mode is not yet supported" + "for TPUEmbeddingShardedVariable." + ) + else: + return self._read_variable_op() + + def assign( + self, + value: Any, + use_locking: bool = False, + name: Optional[Any] = None, + read_value: bool = True, + ) -> Any: + if tpu_util.enclosing_tpu_context() is None: + # Running in a host context + for device in self.distribute_strategy.extended.worker_devices: + with ops.device(device): + self.assign_on_device(device, value) + return tpu_util.make_raw_assign_fn( + gen_resource_variable_ops.assign_variable_op + )( + self, + value=value, + use_locking=use_locking, + name=name, + read_value=read_value, + ) + + def assign_on_device(self, device, value): + if self._packed_var is None: + raise NotImplementedError("Required packed variable support") + with ops.device(device): + gen_resource_variable_ops.assign_variable_op( + resource=self._packed_var.handle, value=value + ) + + def read_from_device(self, device): + if self._packed_var is None: + raise NotImplementedError("Required packed variable support") + with ops.device(device): + return gen_resource_variable_ops.read_variable_op( + resource=self._packed_var.handle, dtype=self.dtype + ) + + +# TODO(pineapplejuice233): Add debug string representation of the class. +PartitionedCsrFormatTensor = collections.namedtuple( + "PartitionedCsrFormatTensor", + [ + "row_pointers", + "sorted_sample_ids", + "sorted_token_ids", + "sorted_gains", + "sample_count", + "num_minibatches_per_physical_sparse_core", + ], +) + + +# TODO(b/233952762): Add tests of this version of the mid-level API. +@tf_export("tpu.experimental.embedding.TPUEmbeddingV2") +class TPUEmbeddingV2(tpu_embedding_base.TPUEmbeddingBase): + """The TPUEmbedding mid level API running on TPU with sparse core accelerator.""" + + def __init__( + self, + feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic + optimizer: Optional[tpu_embedding_v2_utils._Optimizer] = None, # pylint:disable=protected-access + pipeline_execution_with_tensor_core: bool = False, + ): + """Creates the TPUEmbeddingV2 mid level API object. + + Args: + feature_config: A nested structure of + `tf.tpu.experimental.embedding.FeatureConfig` configs. + optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`, + `tf.tpu.experimental.embedding.Adagrad` or + `tf.tpu.experimental.embedding.Adam`. When not created under TPUStrategy + may be set to None to avoid the creation of the optimizer slot + variables, useful for optimizing memory consumption when exporting the + model for serving where slot variables aren't needed. + pipeline_execution_with_tensor_core: If True, the TPU embedding + computations will overlap with the TensorCore computations (and hence + will be one step old). Set to True for improved performance. + + Raises: + ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD, + Adam or Adagrad) or None when created under a TPUStrategy. + RuntimeError: If not created under TPUStrategy. + """ + # We do a clone on the feature_config here as we will alter settings in it + # and we don't want the user to see these. We can't just use clone here + # as we need to maintain some object relationships. + super().__init__(self._clone_feature_config(feature_config), optimizer) + self._strategy = distribute_lib.get_strategy() + if not isinstance( + self._strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2) + ): + raise RuntimeError( + "TPUEmbeddingV2 should be created under TPUStrategy but found {}." + .format(self._strategy) + ) + + self._num_sc_per_chip = ( + self._strategy.extended.tpu_hardware_feature.num_embedding_devices_per_chip + ) + if self._num_sc_per_chip == 0: + logging.warning( + "No embedding devices per chip info is found. Using 4 as the default" + " value for SparseCore." + ) + self._num_sc_per_chip = 4 + + self._num_sc_shards = ( + self._strategy.num_replicas_in_sync * self._num_sc_per_chip + ) + + # We need this in multiple places, so avoid flattening multiple times. + # This order will also be used when stacking features. + self._flat_features = nest.flatten_with_joined_string_paths( + self._feature_config + ) + + self._round_table_sizes() + self._stack_tables_with_same_table_dim_and_optimizer() + + # These hyperparameters will be provided by the FDO. Currently hardcode + # here just for testing. + # TODO(pineapplejuice233): Remove these hyperparameters. + self.max_ids_per_chip_per_sample = 64 + self.max_minibatches_per_sc = 64 + + self._pipelining = pipeline_execution_with_tensor_core + + def _clone_feature_config(self, feature_config): + old_to_new_table = {} + new_features = [] + + for old_feature in nest.flatten(feature_config): + feature = copy.copy(old_feature) + if feature.table not in old_to_new_table: + old_to_new_table[feature.table] = copy.copy(feature.table) + feature.table = old_to_new_table[feature.table] + new_features.append(feature) + + return nest.pack_sequence_as(feature_config, new_features) + + def _round_table_sizes(self): + num_shards = self._num_sc_shards * 8 + + self._table_to_padding_columns = {} + self._table_to_padding_rows = {} + + for table in self._table_config: + extra_rows = ( + num_shards - (table.vocabulary_size % num_shards) + ) % num_shards + extra_cols = (8 - (table.dim % 8)) % 8 + if extra_rows != 0: + if table.vocabulary_size < num_shards: + logging.warning( + "!!! Adding %d extra rows to a small table %s!!! Table had" + " %d rows before padding and %d rows after padding.", + extra_rows, + table.name, + table.vocabulary_size, + table.vocabulary_size + extra_rows, + ) + else: + logging.warning( + "Adding %d extra rows to table %s to get %d rows.", + extra_rows, + table.name, + table.vocabulary_size + extra_rows, + ) + if extra_cols != 0: + logging.warning( + "Adding %d extra columns to table %s to get %d columns.", + extra_cols, + table.name, + table.dim + extra_cols, + ) + self._table_to_padding_columns[table.name] = extra_cols + self._table_to_padding_rows[table.name] = extra_rows + table.vocabulary_size += extra_rows + table.dim += extra_cols + return + + @property + def embedding_tables( + self, + ) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]: + """Returns a dict of embedding tables, keyed by `TableConfig`.""" + self._maybe_build() + # Only return the tables and not the slot variables. + return { + stacked_table_name: self._variables[stacked_table_name]["parameters"] + for stacked_table_name in self._stacked_table_to_tables + } + + @property + def embedding_table_shards( + self, + ) -> Dict[tpu_embedding_v2_utils.TableConfig, List[tf_variables.Variable]]: + """Returns a dict of embedding tables, keyed by `TableConfig`.""" + self._maybe_build() + + # This reflects the device assignment used by the TPU Strategy. + ordered_devices = [] + for devices in self._strategy.extended._tpu_devices: # pylint: disable=protected-access + ordered_devices.extend(devices) + + table_shards = { + name: [ + (device, var.read_from_device(device)) for device in ordered_devices + ] + for name, var in self.embedding_tables.items() + } + + return table_shards + + @property + def variables( + self, + ) -> Dict[ + tpu_embedding_v2_utils.TableConfig, Dict[str, tf_variables.Variable] + ]: + """Returns a dict of variables, keyed by `TableConfig`, then by slot name.""" + self._maybe_build() + return self._variables + + def _create_variables( + self, + stacked_tables: List[tpu_embedding_v2_utils.TableConfig], + stacked_table_name: str, + ) -> Dict[str, tf_variables.Variable]: + """Create all variables including table variables and slot variables.""" + total_vocab_size = sum([table.vocabulary_size for table in stacked_tables]) + table_dim = stacked_tables[0].dim + variable_shape = (total_vocab_size, table_dim) + optimizer = stacked_tables[0].optimizer + + def table_initialize_fn(shape, dtype, shard_info=None): + # Concat all the tables along the first axis. + table_tensors = [] + # Temporary patch, we need to initialize tables with the SC level + # sharding. Note that we need to ensure that the vocab size is divisible + # by the global number of SC. + for i in range(self._num_sc_per_chip): + # Each underlying table has column lookups rotated by 1 to avoid hot + # spots on core 0 for id=0. We shift the initializer as well to help + # with comparisons against CPU. + full_tables = {} + for table in stacked_tables: + shift = self._table_to_stacked_table_offset[table.name][2] + arg_spec = tf_inspect.getfullargspec(table.initializer) + sharding_aware = ( + "shard_info" in arg_spec.args + or "shard_info" in arg_spec.kwonlyargs + ) + + # If the user-initializer is not sharding aware, use it to construct + # the full initial table and then slice out the individual shards. + if shard_info and not sharding_aware: + if table.name not in full_tables: + full_tables[table.name] = table.initializer( + shape=(table.vocabulary_size, table.dim), + dtype=dtype, + ) + if shard_info is not None: + # A partition contains all of the tables. + partition_shape = shard_info.shape + partition_offset = shard_info.offset + # Scale the partition to get sizes for the current table, + # then select this sc shard. + sc_shard_size = ( + table.vocabulary_size + * partition_shape[0] + // total_vocab_size + // self._num_sc_per_chip + ) + sc_shard_offset = ( + table.vocabulary_size * partition_offset[0] // total_vocab_size + ) + i * sc_shard_size + sc_shard_info = base.ShardInfo( + [sc_shard_size, table.dim], [sc_shard_offset, 0] + ) + if sharding_aware: + sc_shard = table.initializer( + shape=(table.vocabulary_size, table.dim), + dtype=dtype, + shard_info=sc_shard_info, + ) + else: + shard_index = sc_shard_info.offset[0] // sc_shard_info.shape[0] + # Rotate the shards. + shard_index = (shard_index - shift) % self._num_sc_shards + tpu_devices = self._strategy.extended._tpu_devices # pylint:disable=protected-access + num_replicas, num_cores_per_replica = tpu_devices.shape + num_sc = ( + num_replicas * num_cores_per_replica * self._num_sc_per_chip + ) + sc_shard = full_tables[table.name][shard_index::num_sc, :] + else: + sc_shard = table.initializer( + shape=( + (table.vocabulary_size * shape[0]) + // total_vocab_size + // self._num_sc_per_chip, + table.dim, + ), + dtype=dtype, + ) + table_tensors.append(sc_shard) + return array_ops.concat(table_tensors, axis=0) + + def getter(name, shape, dtype, initializer, trainable): + del shape + initial_value = functools.partial( + initializer, shape=variable_shape, dtype=dtype + ) + # _add_variable_with_custom_getter clears the shape sometimes, so we + # take the global shape from outside the getter. + return tf_variables.Variable( + name=name, + initial_value=initial_value, + shape=variable_shape, + dtype=dtype, + trainable=trainable, + ) + + def variable_creator(name, initializer): + # Use add_variable_with_custom_getter here so that we take advantage of + # the checkpoint loading to allow restore before the variables get + # created which avoids double initialization. + return self._add_variable_with_custom_getter( + name=name, + initializer=initializer, + shape=variable_shape, + dtype=dtypes.float32, + getter=getter, + trainable=False, + ) + + with variable_scope.variable_creator_scope( + make_sharded_variable_creator(self._strategy) + ): + parameters = variable_creator(stacked_table_name, table_initialize_fn) + + def slot_creator(name, initializer): + return variable_creator(stacked_table_name + "/" + name, initializer) + + if optimizer is not None: + with variable_scope.variable_creator_scope( + make_sharded_variable_creator(self._strategy) + ): + slot_vars = optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access + else: + slot_vars = {} + slot_vars["parameters"] = parameters + return slot_vars + + def _stack_tables_with_same_table_dim_and_optimizer(self): + """Stack tables with the same table dim and optimizer.""" + logging.info( + "Number of tables before stacking is %d", len(self._table_config) + ) + + table_names = [] + table_widths = [] + table_heights = [] + table_num_samples = [] + table_groups = [] + + table_data_to_group = {} + table_to_num_samples = {table.name: 0 for table in self._table_config} + table_name_to_table = {} + for _, feature in self._flat_features: + table_to_num_samples[feature.table.name] += functools.reduce( + operator.mul, feature.output_shape + ) + + for table in self._table_config: + table_name_to_table[table.name] = table + key = ( + table.dim, + table.optimizer, + repr(table.quantization_config) + if table.quantization_config + else None, + ) + if key not in table_data_to_group: + table_data_to_group[key] = len(table_data_to_group) + table_groups.append(table_data_to_group[key]) + table_names.append(table.name) + table_widths.append(table.dim) + table_heights.append(table.vocabulary_size) + table_num_samples.append(table_to_num_samples[table.name]) + + table_stacks_by_name = _pywrap_tpu_embedding.stack_tables( + table_heights, + table_widths, + table_num_samples, + table_groups, + table_names, + self._strategy.num_replicas_in_sync, + ) + + table_stacks = [ + [table_name_to_table[table_name] for table_name in stack_by_name] + for stack_by_name in table_stacks_by_name + ] + + # Store the mapping between stacked table names to the actual tableConfigs. + self._stacked_table_to_tables = {} + # Store the mapping between table to name of the stacked table which + # contains the table and its offset. + self._table_to_stacked_table_offset = {} + # Save Quantization Config per stacked tables + self._quantization_configs = {} + for tables in table_stacks: + stacked_table_name = "_".join(map(lambda table: table.name, tables)) + if stacked_table_name in self._stacked_table_to_tables: + raise ValueError(f"{stacked_table_name} already exists!") + self._stacked_table_to_tables[stacked_table_name] = tables + self._quantization_configs[stacked_table_name] = tables[ + 0 + ].quantization_config + + current_offset = 0 + current_index = 0 + for table in tables: + self._table_to_stacked_table_offset[table.name] = ( + stacked_table_name, + current_offset, + self._num_sc_per_chip * current_index, + ) + current_offset += table.vocabulary_size + current_index += 1 + + logging.info( + "Number of tables after stacking is %d.", + len(self._stacked_table_to_tables), + ) + + self._feature_to_sample_offset = {} + self._table_to_sample_count = { + table_name: 0 for table_name in self._stacked_table_to_tables + } + for feature_path, feature in self._flat_features: + stacked_table_name = self._table_to_stacked_table_offset[ + feature.table.name + ][0] + self._feature_to_sample_offset[feature_path] = ( + self._table_to_sample_count[stacked_table_name] + ) + self._table_to_sample_count[stacked_table_name] += functools.reduce( + operator.mul, feature.output_shape + ) + + def _create_variables_and_slots( + self, + ) -> Dict[str, Dict[str, tf_variables.Variable]]: + """Create variables for TPU embeddings. + + Returns: + A dict of dicts. The outer dict is keyed by the table names and the inner + dicts are keyed by 'parameters' and the slot variable names. + """ + variables = {} + for stacked_table_name, tables in self._stacked_table_to_tables.items(): + variables[stacked_table_name] = self._create_variables( + tables, stacked_table_name=stacked_table_name + ) + return variables + + def _maybe_build(self): + if not self._built: + # This can be called while tracing a function, so we wrap the + # initialization code with init_scope so it runs eagerly, this means that + # it will not be included in the function graph generated by tracing so + # that we can be sure that we only initialize the TPU for embeddings + # exactly once. + with ops.init_scope(): + self.build() + + def build(self): + """Create variables and slots variables for TPU embeddings.""" + if self._built: + return + self._variables = self._create_variables_and_slots() + self._built = True + + def apply_gradients( + self, + gradients: Any, + preserved_outputs: Dict[str, PartitionedCsrFormatTensor], + ): + """Applies the gradient update to the embedding tables. + + If a gradient of `None` is passed in any position of the nested structure, + then a gradient update with a zero gradient is applied for that feature. + For optimizers like SGD or Adagrad, this is the same as applying no update + at all. For lazy Adam and other sparsely applied optimizers with decay, + ensure you understand the effect of applying a zero gradient. + + Args: + gradients: A nested structure of gradients, with structure matching the + `feature_config` passed to this object. + preserved_outputs: A dicts of PartitionedCsrFormatTensor, coming from the + second output of the embedding lookup call. + + Raises: + RuntimeError: if not built. + ValueError: If a non-`tf.Tensor` non-`None` gradient is passed in, or a + `tf.Tensor` of the incorrect shape is passed in. Also if + the size of any sequence in `gradients` does not match corresponding + sequence in `feature_config`. + TypeError: If the type of any sequence in `gradients` does not match + corresponding sequence in `feature_config`. + """ + if not self._built: + raise RuntimeError( + "apply_gradients called on unbuilt TPUEmbeddingV2 object. Please" + " either call the embedding lookup method first or manually call the" + " build method." + ) + nest.assert_same_structure(self._feature_config, gradients) + + # Note that stacking gradients is placed on the core of the trianing step + # to reduce the number of input/output arguments of the training loop during + # pipelining. + gradients = self._stack_gradients(gradients) + + context = EmbeddingPipeliningContext( + _PIPELINE_MODE_BACKWARD, self._pipelining + ) + context.Enter() + + def _wrap_param(param, dtype=dtypes.float32): + if callable(param): + param = math_ops.cast(param(), dtype=dtype) + return ops.convert_to_tensor(param, dtype=dtype) + + # Take num_minibatches_per_physical_sparse_core from any table as + # they are the same across tables. + num_minibatches_per_physical_sparse_core = list(preserved_outputs.values())[ + 0 + ].num_minibatches_per_physical_sparse_core + + for table_name in self._stacked_table_to_tables: + gradient = gradients[table_name] + partitioned_tensor = preserved_outputs[table_name] + + table = self.variables[table_name]["parameters"] + optimizer = self._stacked_table_to_tables[table_name][0].optimizer + if isinstance(optimizer, tpu_embedding_v2_utils.SGD): + updated_embedding_table = xla_ops.xla_sparse_dense_matmul_grad_with_sgd_and_csr_input( + row_pointers=partitioned_tensor.row_pointers, + sorted_sample_ids=partitioned_tensor.sorted_sample_ids, + sorted_token_ids=partitioned_tensor.sorted_token_ids, + sorted_gains=partitioned_tensor.sorted_gains, + activation_gradients=gradient, + learning_rate=_wrap_param(optimizer.learning_rate), + embedding_table=table.read_value(), + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + table_name=table_name, + ) + table.assign(updated_embedding_table) + elif isinstance(optimizer, tpu_embedding_v2_utils.Adagrad): + accumulators = self.variables[table_name]["accumulators"] + updated_embedding_table, updated_accumulator = ( + xla_ops.xla_sparse_dense_matmul_grad_with_adagrad_and_csr_input( + row_pointers=partitioned_tensor.row_pointers, + sorted_sample_ids=partitioned_tensor.sorted_sample_ids, + sorted_token_ids=partitioned_tensor.sorted_token_ids, + sorted_gains=partitioned_tensor.sorted_gains, + activation_gradients=gradient, + learning_rate=_wrap_param(optimizer.learning_rate), + embedding_table=table.read_value(), + accumulator=accumulators.read_value(), + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + table_name=table_name, + ) + ) + accumulators.assign(updated_accumulator) + table.assign(updated_embedding_table) + elif isinstance(optimizer, tpu_embedding_v2_utils.AdagradMomentum): + accumulators = self.variables[table_name]["accumulators"] + momenta = self.variables[table_name]["momenta"] + updated_embedding_table, updated_accumulator, updated_momenta = ( + xla_ops.xla_sparse_dense_matmul_grad_with_adagrad_momentum_and_csr_input( + row_pointers=partitioned_tensor.row_pointers, + sorted_sample_ids=partitioned_tensor.sorted_sample_ids, + sorted_token_ids=partitioned_tensor.sorted_token_ids, + sorted_gains=partitioned_tensor.sorted_gains, + activation_gradients=gradient, + learning_rate=_wrap_param(optimizer.learning_rate), + embedding_table=table.read_value(), + accumulator=accumulators.read_value(), + momenta=momenta.read_value(), + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + use_nesterov=optimizer.use_nesterov, + exponent=optimizer.exponent, + beta1=optimizer.momentum, + beta2=optimizer.beta2, + epsilon=optimizer.epsilon, + table_name=table_name, + ) + ) + momenta.assign(updated_momenta) + accumulators.assign(updated_accumulator) + table.assign(updated_embedding_table) + elif isinstance(optimizer, tpu_embedding_v2_utils.Adam): + momenta = self.variables[table_name]["momenta"] + velocity = self.variables[table_name]["velocities"] + updated_embedding_table, updated_momenta, updated_velocity = ( + xla_ops.xla_sparse_dense_matmul_grad_with_adam_and_csr_input( + row_pointers=partitioned_tensor.row_pointers, + sorted_sample_ids=partitioned_tensor.sorted_sample_ids, + sorted_token_ids=partitioned_tensor.sorted_token_ids, + sorted_gains=partitioned_tensor.sorted_gains, + activation_gradients=gradient, + learning_rate=_wrap_param(optimizer.learning_rate), + embedding_table=table.read_value(), + momenta=momenta.read_value(), + velocity=velocity.read_value(), + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + use_sum_inside_sqrt=optimizer.sum_inside_sqrt, + beta1=optimizer.beta_1, + beta2=optimizer.beta_2, + epsilon=optimizer.epsilon, + table_name=table_name, + ) + ) + velocity.assign(updated_velocity) + momenta.assign(updated_momenta) + table.assign(updated_embedding_table) + elif isinstance(optimizer, tpu_embedding_v2_utils.FTRL): + accumulators = self.variables[table_name]["accumulators"] + linears = self.variables[table_name]["linears"] + (updated_table_tensor, updated_accum_tensor, updated_linear_tensor) = ( + xla_ops.xla_sparse_dense_matmul_grad_with_ftrl_and_csr_input( + row_pointers=partitioned_tensor.row_pointers, + sorted_sample_ids=partitioned_tensor.sorted_sample_ids, + sorted_token_ids=partitioned_tensor.sorted_token_ids, + sorted_gains=partitioned_tensor.sorted_gains, + activation_gradients=gradient, + learning_rate=_wrap_param(optimizer.learning_rate), + embedding_table=table.read_value(), + accumulator=accumulators.read_value(), + linear=linears.read_value(), + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + multiply_linear_by_learning_rate=optimizer.multiply_linear_by_learning_rate, + beta=optimizer.beta, + learning_rate_power=optimizer.learning_rate_power, + l1_regularization_strength=optimizer.l1_regularization_strength, + l2_regularization_strength=optimizer.l2_regularization_strength, + table_name=table_name, + ) + ) + linears.assign(updated_linear_tensor) + accumulators.assign(updated_accum_tensor) + table.assign(updated_table_tensor) + else: + raise ValueError("Unsupported optimizer in minibatching mode.") + + context.Exit() + + def _stack_gradients(self, gradients): + """Stack the incoming gradients to per table gradients.""" + + # Gradients are stacked in a particular order. That order is the order + # features appear in the self._flat_features. + table_to_gradient_list = { + table_name: [] for table_name in self._stacked_table_to_tables + } + flattend_gradients = nest.flatten(gradients) + for gradient, (path, feature) in zip( + flattend_gradients, self._flat_features + ): + sample_count = functools.reduce(operator.mul, feature.output_shape) + if gradient is not None and not isinstance(gradient, tensor.Tensor): + raise ValueError( + f"found non-tensor type: {type(gradient)} at path {path}." + ) + if gradient is None: + # TODO(bfontain): In the case that an entire table's gradient is gone + # then maybe we can just omit the update all together? + logging.warning( + ( + "No gradient passed for feature %s, sending zero " + "gradient. This may not be correct behavior for certain " + "optimizers like Adam." + ), + path, + ) + gradient = array_ops.zeros( + (sample_count, feature.table.dim), dtype=dtypes.float32 + ) + table_name = self._table_to_stacked_table_offset[feature.table.name][0] + extra_cols = self._table_to_padding_columns[feature.table.name] + gradient = array_ops.reshape( + gradient, [-1, feature.table.dim - extra_cols] + ) + if extra_cols != 0: + gradient = array_ops.pad(gradient, [[0, 0], [0, extra_cols]]) + # Ensure static shape after padding. + gradient.set_shape([sample_count, feature.table.dim]) + table_to_gradient_list[table_name].append(gradient) + + return { + table_name: array_ops.concat(table_to_gradient_list[table_name], axis=0) + for table_name in table_to_gradient_list + } + + def _unstack_activations(self, activations: Dict[str, tensor.Tensor]): + """Untack the incoming per table activations into per feature.""" + + # Activations are stacked in a particular order. That order is the order + # features appear in the self._flat_features. + + flattened_activations = [] + table_to_current_offset = { + table_name: 0 for table_name in self._stacked_table_to_tables + } + for _, feature in self._flat_features: + sample_count = functools.reduce(operator.mul, feature.output_shape) + table_name = self._table_to_stacked_table_offset[feature.table.name][0] + extra_cols = self._table_to_padding_columns[feature.table.name] + activation = array_ops.slice( + activations[table_name], + [table_to_current_offset[table_name], 0], + [sample_count, feature.table.dim - extra_cols], + ) + + # Reshape to follow the user's requested output shape. + activation = array_ops.reshape( + activation, + list(feature.output_shape) + [feature.table.dim - extra_cols], + ) + flattened_activations.append(activation) + table_to_current_offset[table_name] += sample_count + + return nest.pack_sequence_as(self._feature_config, flattened_activations) + + def __call__( + self, features: Any, weights: Optional[Any] = None + ) -> Tuple[Any, Dict[str, PartitionedCsrFormatTensor]]: + """Call the mid level api to do embedding lookup.""" + return self.embedding_lookup(features, weights) + + @staticmethod + def _convert_input_feature_to_coo( + input_feature: Union[ + tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor + ], + weight: Optional[tensor.Tensor], + feature_config: tpu_embedding_v2_utils.FeatureConfig, + row_offset: int, + col_offset: int, + col_shift: int, + vocab_size: int, + num_sc_shards: int, + ) -> Any: + """Convert any of the expected input types to a COO format.""" + sample_count = functools.reduce(operator.mul, feature_config.output_shape) + if isinstance(input_feature, tensor.Tensor): + input_feature = array_ops.reshape(input_feature, [-1]) + if weight is None: + weight = array_ops.ones_like(input_feature, dtype=dtypes.float32) + elif isinstance(weight, tensor.Tensor): + weight = array_ops.reshape(weight, [-1]) + else: + raise ValueError( + f"Expect weight to be Tensor type but got {type(weight)}" + ) + row_ids, col_ids, gains = xla_ops.convert_to_coo_tensor( + indices_or_row_splits=array_ops.zeros((0,), dtype=dtypes.int32), + values=math_ops.cast(input_feature, dtype=dtypes.int32), + weights=math_ops.cast(weight, dtypes.float32), + sample_count=sample_count, + combiner=feature_config.table.combiner, + ) + elif isinstance(input_feature, sparse_tensor.SparseTensor): + if weight is None: + weight = array_ops.ones_like(input_feature.values, dtype=dtypes.float32) + elif isinstance(weight, sparse_tensor.SparseTensor): + weight = weight.values + else: + raise ValueError( + f"Expect weight to be SparseTensor type but got {type(weight)}" + ) + row_ids, col_ids, gains = xla_ops.convert_to_coo_tensor( + indices_or_row_splits=math_ops.cast( + input_feature.indices, dtype=dtypes.int32 + ), + values=math_ops.cast(input_feature.values, dtype=dtypes.int32), + weights=math_ops.cast(weight, dtypes.float32), + sample_count=sample_count, + combiner=feature_config.table.combiner, + ) + elif isinstance(input_feature, ragged_tensor.RaggedTensor): + if not weight: + weight = array_ops.ones_like(input_feature.values, dtype=dtypes.float32) + elif isinstance(weight, ragged_tensor.RaggedTensor): + weight = weight.values + else: + raise ValueError( + f"Expect weight to be RaggedTensor type but got {type(weight)}" + ) + row_ids, col_ids, gains = xla_ops.convert_to_coo_tensor( + indices_or_row_splits=math_ops.cast( + input_feature.row_splits, dtype=dtypes.int32 + ), + values=math_ops.cast(input_feature.values, dtype=dtypes.int32), + weights=math_ops.cast(weight, dtypes.float32), + sample_count=sample_count, + combiner=feature_config.table.combiner, + ) + else: + raise ValueError( + f"Input of unknown type {type(input_feature)}. Please only pass " + "Tensor, SparseTensor or RaggedTensor as input to embedding " + "lookup." + ) + return ( + row_ids + row_offset, + ( + (col_ids + col_shift) % num_sc_shards + + (col_ids // num_sc_shards * num_sc_shards) + + col_offset + ), + gains, + ) + + @staticmethod + def _preprocess_inputs_and_weights_to_coo_tensor( + flat_inputs: Any, + flat_weights: Any, + flat_features: Any, + stacked_table_to_tables: Dict[str, Any], + table_to_stacked_table_offset: Dict[str, Tuple[str, int, int]], + feature_to_sample_offset: Dict[str, int], + num_sc_shards: int, + ) -> Dict[str, Any]: + """Convert the raw inputs into coo tensor.""" + table_to_list_of_coos = { + table_name: ([], [], []) for table_name in stacked_table_to_tables + } + for inp, weight, (feature_path, feature) in zip( + flat_inputs, flat_weights, flat_features + ): + table_name, col_offset, col_shift = table_to_stacked_table_offset[ + feature.table.name + ] + row_offset = feature_to_sample_offset[feature_path] + # Consider making this into one op per table rather than per feature? + row_ids, col_ids, gains = TPUEmbeddingV2._convert_input_feature_to_coo( + inp, + weight, + feature, + row_offset, + col_offset, + col_shift, + feature.table.vocabulary_size, + num_sc_shards, + ) + table_to_list_of_coos[table_name][0].append(row_ids) + table_to_list_of_coos[table_name][1].append(col_ids) + table_to_list_of_coos[table_name][2].append(gains) + + return table_to_list_of_coos + + @staticmethod + def _get_minibatch_splits_from_coo_tensor( + num_replicas_in_sync: int, + table_to_list_of_coos: Dict[str, Any], + stacked_table_to_tables: Dict[str, Any], + table_to_sample_count: Dict[str, int], + num_sc_per_chip: int, + ) -> Tuple[Dict[str, Any], List[tensor.Tensor]]: + """Compute minibatch splits from the coo tensor.""" + table_to_sorted_coo_tensor = {} + per_replica_table_splits = [] + for table_name in stacked_table_to_tables: + row_ids = array_ops.concat(table_to_list_of_coos[table_name][0], axis=0) + col_ids = array_ops.concat(table_to_list_of_coos[table_name][1], axis=0) + gains = array_ops.concat(table_to_list_of_coos[table_name][2], axis=0) + + # Feature width are the same across stacked tables. + feature_width = stacked_table_to_tables[table_name][0].dim + + total_vocab_size = sum( + [ + table.vocabulary_size + for table in stacked_table_to_tables[table_name] + ] + ) + + ( + sorted_row_ids, + sorted_col_ids, + sorted_gains, + splits, + id_counts, + unused_max_ids, + unused_max_uniques, + ) = xla_ops.get_minibatch_splits_with_physical_replica( + program_key=constant_op.constant([""]), + row_ids=row_ids, + col_ids=col_ids, + gains=gains, + sample_count=table_to_sample_count[table_name], + num_replica=num_replicas_in_sync, + table_vocab_size=total_vocab_size, + feature_width=feature_width, + num_sc_per_chip=num_sc_per_chip, + table_name=table_name, + mini_batch_splits="", + ) + + table_to_sorted_coo_tensor[table_name] = ( + sorted_row_ids, + sorted_col_ids, + sorted_gains, + id_counts, + ) + + per_replica_table_splits.append(splits) + + return (table_to_sorted_coo_tensor, per_replica_table_splits) + + @staticmethod + def _get_minibatches_from_sorted_coo_tensor( + num_replicas_in_sync: int, + max_ids_per_chip_per_sample: int, + max_minibatches_per_sc: int, + table_to_sorted_coo_tensor: Dict[str, Any], + cross_replica_table_splits: tensor.Tensor, + stacked_table_to_tables: Dict[str, Any], + table_to_sample_count: Dict[str, int], + num_sc_per_chip: int, + ) -> Any: + """Partition the sorted coo tensor into minibatches.""" + table_to_csr_format_tensor = {} + for table_name in stacked_table_to_tables: + sorted_row_ids, sorted_col_ids, sorted_gains, id_counts = ( + table_to_sorted_coo_tensor[table_name] + ) + + # Feature width are the same across stacked tables. + feature_width = stacked_table_to_tables[table_name][0].dim + + total_vocab_size = sum( + [ + table.vocabulary_size + for table in stacked_table_to_tables[table_name] + ] + ) + ( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + row_pointers_unpadded_size, + ids_unpadded_size, + num_minibatches_per_physical_sparse_core, + ) = xla_ops.get_minibatches_in_csr_with_physical_replica( + program_key=constant_op.constant([""]), + row_ids=sorted_row_ids, + col_ids=sorted_col_ids, + gains=sorted_gains, + splits=cross_replica_table_splits, + id_counts=id_counts, + sample_count=table_to_sample_count[table_name], + num_replica=num_replicas_in_sync, + max_minibatches_per_sc=max_minibatches_per_sc, + max_ids_per_chip_per_sample=max_ids_per_chip_per_sample, + table_vocab_size=total_vocab_size, + feature_width=feature_width, + num_sc_per_chip=num_sc_per_chip, + table_name=table_name, + mini_batch_in_csr="", + ) + table_to_csr_format_tensor[table_name] = ( + PartitionedCsrFormatTensor( + row_pointers=row_pointers, + sorted_sample_ids=sorted_sample_ids, + sorted_token_ids=sorted_token_ids, + sorted_gains=sorted_gains, + sample_count=table_to_sample_count[table_name], + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + ), + row_pointers_unpadded_size, + ids_unpadded_size, + ) + return table_to_csr_format_tensor + + # TODO(pineapplejuice233): Duplicated helper function from tpu_embedding_v2.py. Remove + # this once this file is open souced. + def _raise_error_for_incorrect_control_flow_context(self): + """Raises an error if we are not in the TPUReplicateContext.""" + # Do not allow any XLA control flow (i.e. control flow in between a + # TPUStrategy's run call and the call to this function), as we can't + # extract the enqueue from the head when in XLA control flow. + graph = ops.get_default_graph() + in_tpu_ctx = False + while graph is not None: + ctx = graph._get_control_flow_context() # pylint: disable=protected-access + while ctx is not None: + if isinstance(ctx, tpu_replication.TPUReplicateContext): + in_tpu_ctx = True + break + ctx = ctx.outer_context + if in_tpu_ctx: + break + graph = getattr(graph, "outer_graph", None) + if graph != ops.get_default_graph() and in_tpu_ctx: + raise RuntimeError( + "Current graph {} does not match graph which contains " + "TPUReplicateContext {}. This is most likely due to the fact that " + "enqueueing embedding data is called inside control flow or a " + "tf.function inside `strategy.run`. This is not supported because " + "outside compilation fails to extract the enqueue ops as the head of " + "a computation.".format(ops.get_default_graph(), graph) + ) + return in_tpu_ctx + + @staticmethod + def preprocess_features( + num_replicas_in_sync: int, + max_ids_per_chip_per_sample: int, + max_minibatches_per_sc: int, + num_sc_per_chip: int, + num_sc_shards: int, + stacked_table_to_tables: Dict[str, Any], + table_to_stacked_table_offset: Dict[str, Tuple[str, int, int]], + table_to_sample_count: Dict[str, int], + feature_to_sample_offset: Dict[str, int], + flat_features: Any, + flat_inputs: Any, + flat_weights: Optional[Any] = None, + ) -> Any: + """Function to preprocess features.""" + # Preprocess the inputs into COO tensor. + table_to_list_of_coos = ( + TPUEmbeddingV2._preprocess_inputs_and_weights_to_coo_tensor( + flat_inputs, + flat_weights, + flat_features, + stacked_table_to_tables, + table_to_stacked_table_offset, + feature_to_sample_offset, + num_sc_shards, + ) + ) + + # Get minibatch splits from the COO tensor. + table_to_sorted_coo_tensor, per_replica_table_splits = ( + TPUEmbeddingV2._get_minibatch_splits_from_coo_tensor( + num_replicas_in_sync, + table_to_list_of_coos, + stacked_table_to_tables, + table_to_sample_count, + num_sc_per_chip, + ) + ) + + # Collective all gather across replicas to get final splits. + cross_replica_table_splits = gen_collective_ops.collective_gather_v2( + input=per_replica_table_splits, + group_size=num_replicas_in_sync, + group_key=0, + instance_key=math_ops.cast(xla_ops.global_iter_id(), dtypes.int32), + ordering_token=[], + ) + + # Use the final splits to convert COO tensors into CSR formatted tensor. + table_to_csr_format_tensor = ( + TPUEmbeddingV2._get_minibatches_from_sorted_coo_tensor( + num_replicas_in_sync, + max_ids_per_chip_per_sample, + max_minibatches_per_sc, + table_to_sorted_coo_tensor, + cross_replica_table_splits, + stacked_table_to_tables, + table_to_sample_count, + num_sc_per_chip, + ) + ) + + return table_to_csr_format_tensor + + def enqueue( + self, + features: Any, + weights: Optional[Any] = None, + device: Optional[str] = None, + ) -> Any: + """Preprocessing the features on host.""" + nest.assert_same_structure(self._feature_config, features) + + flat_inputs = nest.flatten(features) + flat_weights = [None] * len(flat_inputs) + if weights is not None: + nest.assert_same_structure(self._feature_config, weights) + flat_weights = nest.flatten(weights) + + in_tpu_context = self._raise_error_for_incorrect_control_flow_context() + + if in_tpu_context: + # Automatically apply outside compilation if we are in tpu context. + return tpu_replication.outside_compilation( + TPUEmbeddingV2.preprocess_features, + num_replicas_in_sync=self._strategy.num_replicas_in_sync, + max_ids_per_chip_per_sample=self.max_ids_per_chip_per_sample, + max_minibatches_per_sc=self.max_minibatches_per_sc, + num_sc_per_chip=self._num_sc_per_chip, + num_sc_shards=self._num_sc_shards, + stacked_table_to_tables=self._stacked_table_to_tables, + table_to_stacked_table_offset=self._table_to_stacked_table_offset, + table_to_sample_count=self._table_to_sample_count, + feature_to_sample_offset=self._feature_to_sample_offset, + flat_features=self._flat_features, + flat_inputs=flat_inputs, + flat_weights=flat_weights, + ) + elif device is None: + # This is used by keras function tracing. Use any of the TPU devices + # and trace once for a single device. + tpu_devices = self._strategy.extended._tpu_devices # pylint:disable=protected-access + + with ops.device(device_util.get_host_for_device(tpu_devices[0][0])): + return TPUEmbeddingV2.preprocess_features( + num_replicas_in_sync=self._strategy.num_replicas_in_sync, + max_ids_per_chip_per_sample=self.max_ids_per_chip_per_sample, + max_minibatches_per_sc=self.max_minibatches_per_sc, + num_sc_per_chip=self._num_sc_per_chip, + num_sc_shards=self._num_sc_shards, + stacked_table_to_tables=self._stacked_table_to_tables, + table_to_stacked_table_offset=self._table_to_stacked_table_offset, + table_to_sample_count=self._table_to_sample_count, + feature_to_sample_offset=self._feature_to_sample_offset, + flat_features=self._flat_features, + flat_inputs=flat_inputs, + flat_weights=flat_weights, + ) + else: + device_spec = tf_device.DeviceSpec.from_string(device) + if device_spec.device_type != "TPU": + raise ValueError("Non-TPU device {} passed to enqueue.".format(device)) + + with ops.device(device_util.get_host_for_device(device)): + return TPUEmbeddingV2.preprocess_features( + num_replicas_in_sync=self._strategy.num_replicas_in_sync, + max_ids_per_chip_per_sample=self.max_ids_per_chip_per_sample, + max_minibatches_per_sc=self.max_minibatches_per_sc, + num_sc_per_chip=self._num_sc_per_chip, + num_sc_shards=self._num_sc_shards, + stacked_table_to_tables=self._stacked_table_to_tables, + table_to_stacked_table_offset=self._table_to_stacked_table_offset, + table_to_sample_count=self._table_to_sample_count, + feature_to_sample_offset=self._feature_to_sample_offset, + flat_features=self._flat_features, + flat_inputs=flat_inputs, + flat_weights=flat_weights, + ) + + def _copy_tensors_to_device( + self, + partitioned_tensors: Dict[str, Any], + ) -> Any: + """Copy tensors to device.""" + partitioned_device_tensors = {} + for table_name in partitioned_tensors: + partitioned_tensor = partitioned_tensors[table_name][0] + row_pointers_unpadded_size = partitioned_tensors[table_name][1] + ids_unpadded_size = partitioned_tensors[table_name][2] + + row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains = ( + xla_ops.tpu_copy_with_dynamic_shape( + [ + partitioned_tensor.row_pointers, + partitioned_tensor.sorted_sample_ids, + partitioned_tensor.sorted_token_ids, + partitioned_tensor.sorted_gains, + ], + [ + row_pointers_unpadded_size, + ids_unpadded_size, + ids_unpadded_size, + ids_unpadded_size, + ], + ) + ) + + # Placeholder Op for pipelining. + row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains = ( + xla_ops.tpu_annotate_tensors_with_dynamic_shape([ + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + ]) + ) + + partitioned_device_tensors[table_name] = PartitionedCsrFormatTensor( + row_pointers=row_pointers, + sorted_sample_ids=sorted_sample_ids, + sorted_token_ids=sorted_token_ids, + sorted_gains=sorted_gains, + sample_count=partitioned_tensor.sample_count, + num_minibatches_per_physical_sparse_core=( + partitioned_tensor.num_minibatches_per_physical_sparse_core + ), + ) + return partitioned_device_tensors + + def dequeue( + self, + partitioned_tensors: Tuple[ + Dict[str, PartitionedCsrFormatTensor], int, int + ], + ) -> Tuple[Any, Dict[str, PartitionedCsrFormatTensor]]: + """Perform embedding lookup.""" + # We expect this dequeue function will always run inside tpu context. + context = EmbeddingPipeliningContext( + _PIPELINE_MODE_FORWARD, self._pipelining + ) + context.Enter() + # TODO(pineapplejuice233): Add the virtual infeed dequeue here to get the tensors + # rather than getting them from arguments. + partitioned_tensors = tpu_replication.outside_compilation( + self._copy_tensors_to_device, + partitioned_tensors=partitioned_tensors, + ) + + activations = {} + # Take num_minibatches_per_physical_sparse_core from any table as + # they are the same across tables. + num_minibatches_per_physical_sparse_core = list( + partitioned_tensors.values() + )[0].num_minibatches_per_physical_sparse_core + + for table_name in self._stacked_table_to_tables: + partitioned_tensor = partitioned_tensors[table_name] + + table = self.variables[table_name]["parameters"] + quantization_config = self._quantization_configs[table_name] + if not isinstance(partitioned_tensor, PartitionedCsrFormatTensor): + raise ValueError( + "Expect PartitionedCsrFormatTensor but get" + f" {type(partitioned_tensor)}." + ) + activation = xla_ops.xla_sparse_dense_matmul_with_csr_input( + row_pointers=partitioned_tensor.row_pointers, + sorted_sample_ids=partitioned_tensor.sorted_sample_ids, + sorted_token_ids=partitioned_tensor.sorted_token_ids, + sorted_gains=partitioned_tensor.sorted_gains, + input_size=self._table_to_sample_count[table_name], + embedding_table=table, + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + quantization_config_low=( + quantization_config.lower if quantization_config else 0 + ), + quantization_config_high=( + quantization_config.upper if quantization_config else 0 + ), + quantization_config_num_buckets=( + quantization_config.num_buckets if quantization_config else 0 + ), + table_name=table_name, + ) + + activations[table_name] = activation + + context.Exit() + # Note that unstacking gradients is placed on the core of the trianing step + # to reduce the number of input/output arguments of the training loop during + # pipelining. + activations = self._unstack_activations(activations) + + return (activations, partitioned_tensors) + + def embedding_lookup( + self, features: Any, weights: Optional[Any] = None + ) -> Tuple[Any, Dict[str, PartitionedCsrFormatTensor]]: + """Perform embedding lookup on the input feature. + + Args: + features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or + `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs + will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor` + or `tf.RaggedTensor` is supported per call. + weights: If not `None`, a nested structure of `tf.Tensor`s, + `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except + that the tensors should be of float type (and they will be downcast to + `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the + same for the parallel entries from `features` and similarly for + `tf.RaggedTensor`s we assume the row_splits are the same. + + Raises: + ValueError: If the input feature is not one of the Tensor, SparseTensor or + RaggedTensor type. + TypeError: If the type of any sequence in `features` does not match + corresponding sequence in `feature_config`. Similarly for `weights`, if + not `None`. + + Returns: + packed_activations: Embedding lookup results packed as the same sequence + of the input feature. + packed_output: A dict of PartitionedCsrFormatTensors. + """ + if not self._built: + self._maybe_build() + + context = EmbeddingPipeliningContext( + _PIPELINE_MODE_FORWARD, self._pipelining + ) + context.Enter() + + partitioned_tensors = self.enqueue(features, weights) + + context.Exit() + + result = self.dequeue(partitioned_tensors) + + return result + + # TODO(pineapplejuice233): Enable these methods later if needed. Don't open source these + # methods as these ops are not available yet. + @staticmethod + def _experimental_preprocess_features( + num_replicas_in_sync: int, + max_ids_per_chip_per_sample: int, + max_minibatches_per_sc: int, + num_sc_per_chip: int, + num_sc_shards: int, + stacked_table_to_tables: Dict[str, Any], + table_to_stacked_table_offset: Dict[str, Tuple[str, int, int]], + table_to_sample_count: Dict[str, int], + feature_to_sample_offset: Dict[str, int], + flat_features: Any, + flat_inputs: Any, + flat_weights: Optional[Any] = None, + ) -> Any: + """Function to preprocess features.""" + # Preprocess the inputs into list of COO tensor. + table_to_list_of_coos = TPUEmbeddingV2._experimental_preprocess_inputs_and_weights_to_list_of_coo_tensors( + flat_inputs, + flat_weights, + flat_features, + stacked_table_to_tables, + table_to_stacked_table_offset, + feature_to_sample_offset, + num_sc_per_chip, + table_to_sample_count, + num_sc_shards, + ) + + # Sort the COO tensors and compute whether minibatching is needed. + table_to_sorted_coo_tensor, is_minibatching_needed_per_replica = ( + TPUEmbeddingV2._experimental_sort_list_of_coo_tensors( + num_replicas_in_sync, + table_to_list_of_coos, + stacked_table_to_tables, + num_sc_per_chip, + ) + ) + + # Collective all gather across replicas to determine whether minibatching + # is needed. + is_minibatching_needed_cross_replica = ( + gen_collective_ops.collective_gather_v2( + input=is_minibatching_needed_per_replica, + group_size=num_replicas_in_sync, + group_key=0, + instance_key=math_ops.cast(xla_ops.global_iter_id(), dtypes.int32), + ordering_token=[], + ) + ) + + table_to_csr_format_tensor = cond.cond( + math_ops.equal( + math_ops.reduce_sum(is_minibatching_needed_cross_replica), 0 + ), + lambda: TPUEmbeddingV2._experimental_get_single_minibatch_from_sorted_coo_tensor( # pylint: disable=g-long-lambda + num_replicas_in_sync, + max_ids_per_chip_per_sample, + max_minibatches_per_sc, + table_to_sorted_coo_tensor, + stacked_table_to_tables, + table_to_sample_count, + num_sc_per_chip, + ), + lambda: TPUEmbeddingV2._experimental_get_multiple_minibatches_from_sorted_coo_tensor( # pylint: disable=g-long-lambda + num_replicas_in_sync, + max_ids_per_chip_per_sample, + max_minibatches_per_sc, + table_to_sorted_coo_tensor, + stacked_table_to_tables, + table_to_sample_count, + num_sc_per_chip, + ), + strict=True, + ) + + return table_to_csr_format_tensor + + # TODO(pineapplejuice233): Do not use it as they are experimental. + @staticmethod + def _experimental_convert_input_feature_to_list_of_coo_tensors( + input_feature: Union[ + tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor + ], + weight: Optional[tensor.Tensor], + feature_config: tpu_embedding_v2_utils.FeatureConfig, + row_offset: int, + col_offset: int, + col_shift: int, + vocab_size: int, + num_sc_per_chip: int, + num_sc_shards: int, + stacked_table_sample_count: int, + ) -> Any: + """Convert any of the expected input types to a COO format.""" + sample_count = functools.reduce(operator.mul, feature_config.output_shape) + if isinstance(input_feature, tensor.Tensor): + input_feature = array_ops.reshape(input_feature, [-1]) + if weight is None: + weight = array_ops.ones_like(input_feature, dtype=dtypes.float32) + elif isinstance(weight, tensor.Tensor): + weight = array_ops.reshape(weight, [-1]) + else: + raise ValueError( + f"Expect weight to be Tensor type but got {type(weight)}" + ) + row_ids_list, col_ids_list, gains_list = ( + xla_ops.convert_to_list_of_coo_tensors( + indices_or_row_splits=array_ops.zeros((0,), dtype=dtypes.int32), + values=math_ops.cast(input_feature, dtype=dtypes.int32), + weights=math_ops.cast(weight, dtypes.float32), + sample_count=sample_count, + combiner=feature_config.table.combiner, + num_sc_per_chip=num_sc_per_chip, + ) + ) + elif isinstance(input_feature, sparse_tensor.SparseTensor): + if weight is None: + weight = array_ops.ones_like(input_feature.values, dtype=dtypes.float32) + elif isinstance(weight, sparse_tensor.SparseTensor): + weight = weight.values + else: + raise ValueError( + f"Expect weight to be SparseTensor type but got {type(weight)}" + ) + row_ids_list, col_ids_list, gains_list = ( + xla_ops.convert_to_list_of_coo_tensors( + indices_or_row_splits=math_ops.cast( + input_feature.indices, dtype=dtypes.int32 + ), + values=math_ops.cast(input_feature.values, dtype=dtypes.int32), + weights=math_ops.cast(weight, dtypes.float32), + sample_count=sample_count, + combiner=feature_config.table.combiner, + num_sc_per_chip=num_sc_per_chip, + ) + ) + elif isinstance(input_feature, ragged_tensor.RaggedTensor): + if not weight: + weight = array_ops.ones_like(input_feature.values, dtype=dtypes.float32) + elif isinstance(weight, ragged_tensor.RaggedTensor): + weight = weight.values + else: + raise ValueError( + f"Expect weight to be RaggedTensor type but got {type(weight)}" + ) + row_ids_list, col_ids_list, gains_list = ( + xla_ops.convert_to_list_of_coo_tensors( + indices_or_row_splits=math_ops.cast( + input_feature.row_splits, dtype=dtypes.int32 + ), + values=math_ops.cast(input_feature.values, dtype=dtypes.int32), + weights=math_ops.cast(weight, dtypes.float32), + sample_count=sample_count, + combiner=feature_config.table.combiner, + num_sc_per_chip=num_sc_per_chip, + ) + ) + else: + raise ValueError( + f"Input of unknown type {type(input_feature)}. Please only pass " + "Tensor, SparseTensor or RaggedTensor as input to embedding " + "lookup." + ) + for i in range(num_sc_per_chip): + row_ids_list[i] = ( + row_ids_list[i] % (sample_count // num_sc_per_chip) + + int(row_offset // num_sc_per_chip) + + int(stacked_table_sample_count // num_sc_per_chip) * i + ) + col_ids_list[i] = ( + (col_ids_list[i] + col_shift) % num_sc_shards + + (col_ids_list[i] // num_sc_shards * num_sc_shards) + + col_offset + ) + return row_ids_list, col_ids_list, gains_list, sample_count + + # TODO(pineapplejuice233): Do not use it as they are experimental. + @staticmethod + def _experimental_preprocess_inputs_and_weights_to_list_of_coo_tensors( + flat_inputs: Any, + flat_weights: Any, + flat_features: Any, + stacked_table_to_tables: Dict[str, Any], + table_to_stacked_table_offset: Dict[str, Tuple[str, int, int]], + feature_to_sample_offset: Dict[str, int], + num_sc_per_chip: int, + stacked_table_to_sample_count: Dict[str, int], + num_sc_shards: int, + ) -> Dict[str, Any]: + """Convert the raw inputs into list of coo tensors.""" + table_to_list_of_coos = { # pylint: disable=g-complex-comprehension + table_name: ( + [[], [], [], []], + [[], [], [], []], + [[], [], [], []], + [], + [], + ) + for table_name in stacked_table_to_tables + } + for inp, weight, (feature_path, feature) in zip( + flat_inputs, flat_weights, flat_features + ): + table_name, col_offset, col_shift = table_to_stacked_table_offset[ + feature.table.name + ] + stacked_table_sample_count = stacked_table_to_sample_count[table_name] + row_offset = feature_to_sample_offset[feature_path] + # Consider making this into one op per table rather than per feature? + row_ids_list, col_ids_list, gains_list, sample_count = ( + TPUEmbeddingV2._experimental_convert_input_feature_to_list_of_coo_tensors( + inp, + weight, + feature, + row_offset, + col_offset, + col_shift, + feature.table.vocabulary_size, + num_sc_per_chip, + num_sc_shards, + stacked_table_sample_count, + ) + ) + for i in range(num_sc_per_chip): + table_to_list_of_coos[table_name][0][i].append(row_ids_list[i]) + table_to_list_of_coos[table_name][1][i].append(col_ids_list[i]) + table_to_list_of_coos[table_name][2][i].append(gains_list[i]) + table_to_list_of_coos[table_name][3].append( + sample_count // num_sc_per_chip + ) + table_to_list_of_coos[table_name][4].append(col_offset) + return table_to_list_of_coos + + # TODO(pineapplejuice233): Do not use it as they are experimental. + @staticmethod + def _experimental_sort_list_of_coo_tensors( + num_replicas_in_sync: int, + table_to_list_of_coos: Dict[str, Any], + stacked_table_to_tables: Dict[str, Any], + num_sc_per_chip: int, + ) -> Tuple[Dict[str, Any], List[tensor.Tensor]]: + """Sort the coo tensors by replica.""" + table_to_sorted_coo_tensor = { + table_name: ([], [], [], []) for table_name in stacked_table_to_tables + } + is_minibatching_needed_per_table = [] + for table_name in stacked_table_to_tables: + # Feature width are the same across stacked tables. + feature_width = stacked_table_to_tables[table_name][0].dim + + total_vocab_size = sum( + [ + table.vocabulary_size + for table in stacked_table_to_tables[table_name] + ] + ) + for i in range(num_sc_per_chip): + row_ids_list = table_to_list_of_coos[table_name][0][i] + col_ids_list = table_to_list_of_coos[table_name][1][i] + gains_list = table_to_list_of_coos[table_name][2][i] + sample_count_list = table_to_list_of_coos[table_name][3] + col_offset_list = table_to_list_of_coos[table_name][4] + + ( + sorted_row_ids, + sorted_col_ids, + sorted_gains, + id_counts, + is_minibatch_needed, + ) = xla_ops.sort_list_of_coo_tensors_with_physical_replica( + row_ids_list=row_ids_list, + col_ids_list=col_ids_list, + gains_list=gains_list, + sample_count_list=sample_count_list, + col_offset_list=col_offset_list, + num_replica=num_replicas_in_sync, + table_vocab_size=total_vocab_size, + feature_width=feature_width, + num_sc_per_chip=num_sc_per_chip, + table_name=table_name, + ) + + table_to_sorted_coo_tensor[table_name][0].append(sorted_row_ids) + table_to_sorted_coo_tensor[table_name][1].append(sorted_col_ids) + table_to_sorted_coo_tensor[table_name][2].append(sorted_gains) + table_to_sorted_coo_tensor[table_name][3].append(id_counts) + + is_minibatching_needed_per_table.append( + math_ops.cast(is_minibatch_needed, dtypes.int32) + ) + + return (table_to_sorted_coo_tensor, is_minibatching_needed_per_table) + + # TODO(pineapplejuice233): Do not use it as they are experimental. + @staticmethod + def _experimental_get_minibatch_splits_from_sorted_coo_tensor( + num_replicas_in_sync: int, + table_to_sorted_coo_tensor: Dict[str, Any], + stacked_table_to_tables: Dict[str, Any], + table_to_sample_count: Dict[str, int], + num_sc_per_chip: int, + ) -> Tuple[Dict[str, Any], List[tensor.Tensor]]: + """Compute minibatch splits from the sorted coo tensor.""" + table_to_sorted_coo_tensor_with_minibatch = { + table_name: ([], [], [], []) for table_name in stacked_table_to_tables + } + per_replica_table_splits = [] + for table_name in stacked_table_to_tables: + # Feature width are the same across stacked tables. + feature_width = stacked_table_to_tables[table_name][0].dim + + total_vocab_size = sum( + [ + table.vocabulary_size + for table in stacked_table_to_tables[table_name] + ] + ) + + ( + sorted_row_ids_list, + sorted_col_ids_list, + sorted_gains_list, + id_counts_list, + ) = table_to_sorted_coo_tensor[table_name] + + for i in range(num_sc_per_chip): + (sorted_row_ids, sorted_col_ids, sorted_gains, id_counts, splits) = ( + xla_ops.get_multiple_minibatches_splits_with_physical_replica( + sorted_row_ids=sorted_row_ids_list[i], + sorted_col_ids=sorted_col_ids_list[i], + sorted_gains=sorted_gains_list[i], + id_counts=id_counts_list[i], + num_replica=num_replicas_in_sync, + sample_count_per_sc=table_to_sample_count[table_name] + // num_sc_per_chip, + table_vocab_size=total_vocab_size, + feature_width=feature_width, + num_sc_per_chip=num_sc_per_chip, + table_name=table_name, + ) + ) + + table_to_sorted_coo_tensor_with_minibatch[table_name][0].append( + sorted_row_ids + ) + table_to_sorted_coo_tensor_with_minibatch[table_name][1].append( + sorted_col_ids + ) + table_to_sorted_coo_tensor_with_minibatch[table_name][2].append( + sorted_gains + ) + table_to_sorted_coo_tensor_with_minibatch[table_name][3].append( + id_counts + ) + per_replica_table_splits.append(splits) + + return (table_to_sorted_coo_tensor_with_minibatch, per_replica_table_splits) + + # TODO(pineapplejuice233): Do not use it as they are experimental. + @staticmethod + def _experimental_get_multiple_minibatches_from_sorted_coo_tensor( + num_replicas_in_sync: int, + max_ids_per_chip_per_sample: int, + max_minibatches_per_sc: int, + table_to_sorted_coo_tensor: Dict[str, Any], + stacked_table_to_tables: Dict[str, Any], + table_to_sample_count: Dict[str, int], + num_sc_per_chip: int, + ) -> Any: + """Get multiple minibatches from the sorted coo tensor.""" + + table_to_sorted_coo_tensor_with_minibatch, per_replica_table_splits = ( + TPUEmbeddingV2._experimental_get_minibatch_splits_from_sorted_coo_tensor( + num_replicas_in_sync, + table_to_sorted_coo_tensor, + stacked_table_to_tables, + table_to_sample_count, + num_sc_per_chip, + ) + ) + + # Collective all gather across replicas to get final splits. + cross_replica_table_splits = gen_collective_ops.collective_gather_v2( + input=per_replica_table_splits, + group_size=num_replicas_in_sync, + group_key=1, + instance_key=math_ops.cast(xla_ops.global_iter_id(), dtypes.int32), + ordering_token=[], + ) + + table_to_csr_format_tensor = {} + for table_name in stacked_table_to_tables: + ( + sorted_row_ids_list, + sorted_col_ids_list, + sorted_gains_list, + id_counts_list, + ) = table_to_sorted_coo_tensor_with_minibatch[table_name] + + # Feature width are the same across stacked tables. + feature_width = stacked_table_to_tables[table_name][0].dim + + total_vocab_size = sum( + [ + table.vocabulary_size + for table in stacked_table_to_tables[table_name] + ] + ) + ( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + row_pointers_unpadded_size, + ids_unpadded_size, + num_minibatches_per_physical_sparse_core, + ) = xla_ops.convert_to_csr_wrapped_coo_with_physical_replica( + sorted_row_ids_list=sorted_row_ids_list, + sorted_col_ids_list=sorted_col_ids_list, + sorted_gains_list=sorted_gains_list, + id_counts_list=id_counts_list, + splits=cross_replica_table_splits, + sample_count_per_sc=table_to_sample_count[table_name] + // num_sc_per_chip, + num_replica=num_replicas_in_sync, + max_minibatches_per_sc=max_minibatches_per_sc, + max_ids_per_chip_per_sample=max_ids_per_chip_per_sample, + table_vocab_size=total_vocab_size, + feature_width=feature_width, + table_name=table_name, + ) + table_to_csr_format_tensor[table_name] = ( + PartitionedCsrFormatTensor( + row_pointers=row_pointers, + sorted_sample_ids=sorted_sample_ids, + sorted_token_ids=sorted_token_ids, + sorted_gains=sorted_gains, + sample_count=table_to_sample_count[table_name], + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + ), + row_pointers_unpadded_size, + ids_unpadded_size, + ) + return table_to_csr_format_tensor + + # TODO(pineapplejuice233): Do not use it as they are experimental. + @staticmethod + def _experimental_get_single_minibatch_from_sorted_coo_tensor( + num_replicas_in_sync: int, + max_ids_per_chip_per_sample: int, + max_minibatches_per_sc: int, + table_to_sorted_coo_tensor: Dict[str, Any], + stacked_table_to_tables: Dict[str, Any], + table_to_sample_count: Dict[str, int], + num_sc_per_chip: int, + ) -> Any: + """Get a single minibatch from the sorted coo tensor.""" + table_to_csr_format_tensor = {} + for table_name in stacked_table_to_tables: + ( + sorted_row_ids_list, + sorted_col_ids_list, + sorted_gains_list, + id_counts_list, + ) = table_to_sorted_coo_tensor[table_name] + + # Feature width are the same across stacked tables. + feature_width = stacked_table_to_tables[table_name][0].dim + + total_vocab_size = sum( + [ + table.vocabulary_size + for table in stacked_table_to_tables[table_name] + ] + ) + ( + row_pointers, + sorted_sample_ids, + sorted_token_ids, + sorted_gains, + row_pointers_unpadded_size, + ids_unpadded_size, + num_minibatches_per_physical_sparse_core, + ) = xla_ops.convert_to_csr_wrapped_coo_with_physical_replica( + sorted_row_ids_list=sorted_row_ids_list, + sorted_col_ids_list=sorted_col_ids_list, + sorted_gains_list=sorted_gains_list, + id_counts_list=id_counts_list, + splits=constant_op.constant( + 0, dtype=dtypes.int64 + ), # no splits are needed. + sample_count_per_sc=table_to_sample_count[table_name] + // num_sc_per_chip, + num_replica=num_replicas_in_sync, + max_minibatches_per_sc=max_minibatches_per_sc, + max_ids_per_chip_per_sample=max_ids_per_chip_per_sample, + table_vocab_size=total_vocab_size, + feature_width=feature_width, + table_name=table_name, + ) + table_to_csr_format_tensor[table_name] = ( + PartitionedCsrFormatTensor( + row_pointers=row_pointers, + sorted_sample_ids=sorted_sample_ids, + sorted_token_ids=sorted_token_ids, + sorted_gains=sorted_gains, + sample_count=table_to_sample_count[table_name], + num_minibatches_per_physical_sparse_core=num_minibatches_per_physical_sparse_core, + ), + row_pointers_unpadded_size, + ids_unpadded_size, + ) + return table_to_csr_format_tensor + + # TODO(pineapplejuice233): Do not use it as they are experimental. + def _experimental_unstack_activations( + self, activations: Dict[str, tensor.Tensor] + ): + """Untack the incoming per table activations into per feature.""" + + # Activations are stacked in a particular order. That order is the order + # features appear in the self._flat_features. + flattened_activations = [] + table_to_current_offset = { + table_name: 0 for table_name in self._stacked_table_to_tables + } + for table_name in self._stacked_table_to_tables: + activation_shape = activations[table_name].shape + activations[table_name] = array_ops.reshape( + activations[table_name], + [self._num_sc_per_chip, -1, activation_shape[-1]], + ) + for _, feature in self._flat_features: + sample_count = functools.reduce(operator.mul, feature.output_shape) + table_name = self._table_to_stacked_table_offset[feature.table.name][0] + extra_cols = self._table_to_padding_columns[feature.table.name] + activation = array_ops.slice( + activations[table_name], + [0, table_to_current_offset[table_name], 0], + [ + self._num_sc_per_chip, + sample_count // self._num_sc_per_chip, + feature.table.dim - extra_cols, + ], + ) + # Reshape to follow the user's requested output shape. + activation = array_ops.reshape( + activation, + list(feature.output_shape) + [feature.table.dim - extra_cols], + ) + flattened_activations.append(activation) + table_to_current_offset[table_name] += ( + sample_count // self._num_sc_per_chip + ) + + return nest.pack_sequence_as(self._feature_config, flattened_activations) + + # TODO(pineapplejuice233): Do not use it as they are experimental. + def _experimental_stack_gradients(self, gradients): + """Stack the incoming gradients to per table gradients.""" + + # Gradients are stacked in a particular order. That order is the order + # features appear in the self._flat_features. + table_to_gradient_list = { + table_name: [[], [], [], []] + for table_name in self._stacked_table_to_tables + } + flattend_gradients = nest.flatten(gradients) + for gradient, (path, feature) in zip( + flattend_gradients, self._flat_features + ): + sample_count = functools.reduce(operator.mul, feature.output_shape) + if gradient is not None and not isinstance(gradient, tensor.Tensor): + raise ValueError( + f"found non-tensor type: {type(gradient)} at path {path}." + ) + if gradient is None: + # TODO(bfontain): In the case that an entire table's gradient is gone + # then maybe we can just omit the update all together? + logging.warning( + ( + "No gradient passed for feature %s, sending zero " + "gradient. This may not be correct behavior for certain " + "optimizers like Adam." + ), + path, + ) + gradient = array_ops.zeros( + (sample_count, feature.table.dim), dtype=dtypes.float32 + ) + table_name = self._table_to_stacked_table_offset[feature.table.name][0] + extra_cols = self._table_to_padding_columns[feature.table.name] + gradient = array_ops.reshape( + gradient, [-1, feature.table.dim - extra_cols] + ) + if extra_cols != 0: + gradient = array_ops.pad(gradient, [[0, 0], [0, extra_cols]]) + # Ensure static shape after padding. + gradient.set_shape([sample_count, feature.table.dim]) + per_sc_sample_count = sample_count // self._num_sc_per_chip + for i in range(self._num_sc_per_chip): + table_to_gradient_list[table_name][i].append( + array_ops.slice( + gradient, + [i * per_sc_sample_count, 0], + [ + per_sc_sample_count, + feature.table.dim, + ], + ) + ) + for table_name in table_to_gradient_list: + table_to_gradient_list[table_name] = array_ops.concat( + [ + array_ops.concat(table_to_gradient_list[table_name][i], axis=0) + for i in range(self._num_sc_per_chip) + ], + axis=0, + ) + return table_to_gradient_list + + +# TODO(pineapplejuice233): Merge this function with the one in tpu_embeding_v2.py once +# this file is OSSed. +def extract_variable_info( + kwargs: Any, +) -> Tuple[str, Tuple[int, ...], dtypes.DType, Callable[[], Any]]: + """Extracts the variable creation attributes from the kwargs. + + Args: + kwargs: a dict of keyword arguments that were passed to a variable creator + scope. + + Returns: + A tuple of variable name, shape, dtype, initialization function. + """ + if isinstance(kwargs["initial_value"], functools.partial) and ( + "shape" in kwargs["initial_value"].keywords + or kwargs["initial_value"].args + ): + # Sometimes shape is passed positionally, sometimes it's passed as a kwarg. + if "shape" in kwargs["initial_value"].keywords: + shape = kwargs["initial_value"].keywords["shape"] + else: + shape = kwargs["initial_value"].args[0] + return ( + kwargs["name"], + shape, + kwargs["initial_value"].keywords.get("dtype", kwargs["dtype"]), + kwargs["initial_value"].func, + ) + elif ( + "shape" not in kwargs + or kwargs["shape"] is None + or not callable(kwargs["initial_value"]) + ): + raise ValueError( + "Unable to extract initializer function and shape from {}. Please " + "either pass a function that expects a shape and dtype as the " + "initial value for your variable or functools.partial object with " + "the shape and dtype kwargs set. This is needed so that we can " + "initialize the shards of the ShardedVariable locally.".format( + kwargs["initial_value"] + ) + ) + else: + return ( + kwargs["name"], + kwargs["shape"], + kwargs["dtype"], + kwargs["initial_value"], + ) + + +def is_checkpoint_initial_value(initial_value: Any) -> bool: + """Whether the initial value is from checkpoint.""" + return ( + isinstance(initial_value, base.CheckpointInitialValue) + or isinstance(initial_value, base.CheckpointInitialValueCallable) + or ( + isinstance(initial_value, functools.partial) + and isinstance( + initial_value.func, base.CheckpointInitialValueCallable + ) + ) + ) + + +def make_sharded_variable_creator( + strategy: distribute_lib.Strategy, +) -> Callable[..., Any]: + """Create a variable creator which shards across all the tpu device. + + Args: + strategy: a TPUStrategy object. + + Returns: + The sharded variable creator. + """ + tpu_devices = strategy.extended._tpu_devices # pylint:disable=protected-access + + def _create_sharded_variable(next_creator, *args, **kwargs): + """Create a TPUEmbeddingShardedVariable.""" + # Avoid the default mirror variable creator. + kwargs["skip_mirrored_creator"] = True + + # Only support sharding on the first dimension. + shard_dim = 0 + + num_replicas, num_cores_per_replica = tpu_devices.shape + + is_ckpt_init_value = is_checkpoint_initial_value(kwargs["initial_value"]) + + arg_spec = tf_inspect.getfullargspec(kwargs["initial_value"]) + if ( + is_ckpt_init_value + and "shard_info" not in arg_spec.args + and "shard_info" not in arg_spec.kwonlyargs + ): + raise ValueError( + "When a sharded variable is initialized from a checkpoint, " + "shard_info must be in arguments of the init function." + ) + + name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs) + + shape = ops.tensor_shape.TensorShape(shape) + num_devices = num_replicas * num_cores_per_replica + # NOTE: only support sharding variables evenly across devices. + if shape[shard_dim] % num_devices != 0: + raise ValueError( + "Only evenly sharding across devices is currently supported. " + "Got shape {} and {} devices".format(shape, num_devices) + ) + + partition_shape = shape.as_list() + partition_shape[shard_dim] = partition_shape[shard_dim] // num_devices + + unwrapped_arg_spec = tf_inspect.getargspec(unwrapped_initial_value) + sharding_aware = "shard_info" in unwrapped_arg_spec.args + + variables = [] + # Keep track of offset for sharding aware initializers. + partition_offset = [0] * len(shape) + for replica_id in range(num_replicas): + for logic_core_id in range(num_cores_per_replica): + with ops.device(tpu_devices[replica_id][logic_core_id]): + kwargs["name"] = f"{name}/{replica_id}" + kwargs["shape"] = partition_shape + if sharding_aware: + # TODO(pineapplejuice233): Change this to use MOD sharding logic. + shard_info = base.ShardInfo( + tensor_shape.as_shape(partition_shape), + copy.deepcopy(partition_offset), + ) + kwargs["initial_value"] = functools.partial( + kwargs["initial_value"], shard_info=shard_info + ) + partition_offset[shard_dim] += partition_shape[shard_dim] + else: + kwargs["initial_value"] = functools.partial( + unwrapped_initial_value, shape=partition_shape, dtype=dtype + ) + variables.append(next_creator(*args, **kwargs)) + + result = TPUEmbeddingShardedVariable( + strategy, variables, tf_variables.VariableAggregation.NONE, None + ) + return result + + return _create_sharded_variable diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_feed.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_feed.py new file mode 100644 index 0000000000000000000000000000000000000000..23441ccaaad413bc60b3aeaa181907beabe282f1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_feed.py @@ -0,0 +1,956 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =================================================================== + +"""Helper library for handling infeed between hosts and TPUs. +""" + +import itertools + +import numpy as np + +from tensorflow.python.compiler.xla.experimental import xla_sharding +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import array_ops +from tensorflow.python.tpu import tpu_name_util +from tensorflow.python.tpu import tpu_sharding +from tensorflow.python.tpu.ops import tpu_ops + +from tensorflow.python.util import nest + + +def partition_or_replicate_on_host(tensor, dims): + """Partitions or replicates the input tensor. + + The ops inside this function are placed on the host side. + + Args: + tensor: The input tensor which will be partitioned or replicated. + dims: A list of integer describes how to partition the input tensor. + + Returns: + An iterator of `Tensor`s or a list of partitioned tensors. + """ + if dims is None: + return itertools.repeat(tensor) + dims = np.array(dims) + output = [tensor] + shape_list = np.array(tensor.shape.as_list()) + quotients, remainders = np.divmod(shape_list, dims) + for axis, (quotient, remainder, dim, original_size) in enumerate( + zip(quotients, remainders, dims, shape_list)): + if dim <= 1: + continue + if remainder > 0: + # For each dimension, when it cannot be evenly partitioned, XLA assumes + # tensors are partitioned in a greedy manner by using + # ceil_ratio(size/dim) first. E.g. 2D tensor with shape (5, 14) and dims + # are (2, 4). Since 5 % 2 = 1 and 14 % 4 = 2, [5, 14] => + # [[(3, 4), (3, 4), (2, 4), (2, 2)], + # [(2, 4), (2, 4), (2, 4), (2, 2)]] + ceil_ratio = quotient + 1 + num_full_slots, left_over = np.divmod(original_size, ceil_ratio) + num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over] + if len(num_or_size_splits) < dim: + num_or_size_splits += [0] * (dim - len(num_or_size_splits)) + new_output = [] + for x in output: + new_output.append( + array_ops.split( + x, num_or_size_splits=num_or_size_splits, axis=axis)) + output = new_output + else: + output = [array_ops.split(x, int(dim), axis=axis) for x in output] + output = nest.flatten(output) + return output + + +def _tag_sharding_attribute_for_dequeued_tensor(tensor, dims): + """Tags appropriate XLA sharding attribute to the dequeued tensor. + + The sharding attribute of the dequeued tensor will be a tuple. + + Args: + tensor: The dequeued tensor on TPU. + dims: A list of integer describes how the tensor is partitioned. + + Returns: + The same tensor with the xla_sharding attribute. + """ + if dims is None: + return xla_sharding.replicate(tensor, assign_tuple_sharding=True) + elif np.prod(dims) == 1: + return xla_sharding.assign_device(tensor, 0, assign_tuple_sharding=True) + else: + tile_assignment = np.arange(np.prod(dims)).reshape(dims) + return xla_sharding.tile( + tensor=tensor, + tile_assignment=tile_assignment, + assign_tuple_sharding=True) + + +def tag_sharding_attribute_for_dequeued_tensors(dequeues, dims): + """Tags appropriate XLA sharding attribute to the dequeued tensors. + + Args: + dequeues: A list of dequeued tensors on TPU. + dims: A list of integer describes how the tensor is partitioned. + + Returns: + The same dequeues with appropriate xla_sharding attribute. + """ + nest.assert_shallow_structure(dequeues, dims) + return nest.map_structure_up_to( + dequeues, _tag_sharding_attribute_for_dequeued_tensor, dequeues, dims) + + +class InfeedQueue(object): + """A helper object to build a device infeed queue. + + The InfeedQueue builds the host-side and device-side Ops to enqueue and + dequeue elements, respectively, and ensures that their types and + shapes match. + """ + + def __init__(self, + number_of_tuple_elements=None, + tuple_types=None, + tuple_shapes=None, + shard_dimensions=None, + number_of_partitions=None, + name=None): + """Creates a new InfeedQueue with the given configuration. + + The configuration need not be fully specified at creation since it + can be modified subsequently by methods that set the values + explicitly or infer them from the shapes of inputs. + + Args: + number_of_tuple_elements: the number of Tensors fed atomically through the + queue, must be present unless it can be inferred from other arguments. + tuple_types: if not None, a list of types of the elements of the queue. + tuple_shapes: if not None, a list of shapes of the elements of the queue. + shard_dimensions: if not None, a list of dimensions on which the + elements of the queue should be sharded during automatic + parallelization. + number_of_partitions: if > 1, the infeed dequeue shape will contain + the full shape that includes all partitions and add corresponding XLA + annotation on the infeed dequeue op. In this case, the infeed is still + data parallel that feeds per-core batch size to each core while the XLA + computation may be partitioned. As XLA requires infeed dequeue shape to + be per-replica shape, thus we need number_of_partitions here to + calculate the per-replica unpartitioned shape. + name: the name of the queue. + + Raises: + ValueError: if number_of_tuple_elements <= 0; or + number_of_tuple_arguments, tuple_types, tuple_shapes, and + shard_dimensions are all None; or the length of tuple_types, + tuple_shapes, or shard_dimensions is not equal to + number_of_tuple_elements; or any element of shard_dimensions + can't be converted to a Dimension. + TypeError: if any element of tuple_types or tuple_shapes can't + be converted to a dtype or TensorShape, respectively. + """ + self._frozen = False + self._generated_enqueue_ops = False + self._generated_dequeue_op = False + self._name = "InfeedQueue" if name is None else name + if number_of_partitions is None: + self._number_of_partitions = 1 + else: + self._number_of_partitions = number_of_partitions + if number_of_tuple_elements is None: + if tuple_types is not None: + number_of_tuple_elements = len(tuple_types) + elif tuple_shapes is not None: + number_of_tuple_elements = len(tuple_shapes) + elif shard_dimensions is not None: + number_of_tuple_elements = len(shard_dimensions) + else: + raise ValueError( + "number of tuple elements cannot be inferred from InfeedQueue " + "constructor") + if number_of_tuple_elements <= 0: + raise ValueError(f"number_of_tuple_elements {number_of_tuple_elements} " + "must be > 0") + # Make an empty sharding policy for each tuple element. + self._sharding_policies = [ + tpu_sharding.ShardingPolicy() for _ in range(number_of_tuple_elements) + ] + if tuple_types is not None: + self.set_tuple_types(tuple_types) + else: + self._tuple_types = None + if tuple_shapes is not None: + self.set_tuple_shapes(tuple_shapes) + else: + self._tuple_shapes = None + if shard_dimensions is not None: + self.set_shard_dimensions(shard_dimensions) + self._validate() + + def _validate(self): + """Checks that the configuration is self-consistent. + + Raises: + ValueError: if the shapes and sharding policies don't match. + """ + if self.tuple_shapes is not None: + for (policy, shape) in zip(self._sharding_policies, self._tuple_shapes): + # Raise an error if the policy is incompatible with the shape. + _ = policy.get_sharded_shape(shape) + + @property + def number_of_tuple_elements(self): + """Returns the number of InfeedQueue tuple elements.""" + return len(self._sharding_policies) + + @property + def tuple_types(self): + """Returns the types of the InfeedQueue tuple elements.""" + return self._tuple_types + + def set_tuple_types(self, tuple_types): + """Sets the type of each element of the queue. + + tuple_types must be a list of length + self.number_of_tuple_elements, and each element must be + convertible to a dtype. + + Args: + tuple_types: the types of each queue element. + + Raises: + ValueError: if tuple_types is not of length + self.number_of_tuple_elements. + TypeError: if an element of tuple_types cannot be converted to a + dtype. + """ + if len(tuple_types) != self.number_of_tuple_elements: + raise ValueError( + f"tuple_types is {str(tuple_types)}, but must be a list of " + f"length {self.number_of_tuple_elements}" + ) + if self._frozen: + for (frozen, updated) in zip(self._tuple_types, tuple_types): + if frozen != updated: + raise ValueError( + "Trying to update InfeedQueue with frozen configuration with an " + f"incompatible type. Frozen types are {str(self._tuple_types)}, " + f"updated types are {str(tuple_types)}") + else: + try: + self._tuple_types = [dtypes.as_dtype(t) for t in tuple_types] + except (TypeError) as e: + raise TypeError( + f"tuple_types is {str(tuple_types)}, but must be a list of " + f"elements each convertible to dtype: got error {str(e)}") from e + + @property + def tuple_shapes(self): + """Returns the shapes of the InfeedQueue tuple elements.""" + return self._tuple_shapes + + def set_tuple_shapes(self, tuple_shapes): + """Sets the shape of each element of the queue. + + tuple_shapes must be a list of length + self.number_of_tuple_elements, and each element must be + convertible to a TensorShape. + + Args: + tuple_shapes: the shapes of each queue element. + + Raises: + ValueError: if tuple_shapes is not of length + self.number_of_tuple_elements. + TypeError: if an element of tuple_shapes cannot be converted to + a TensorShape. + """ + if len(tuple_shapes) != self.number_of_tuple_elements: + raise ValueError( + f"tuple_shapes is {str(tuple_shapes)}, but must be a list of " + f"length {self.number_of_tuple_elements}" + ) + try: + tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes] + except (ValueError, TypeError) as e: + raise TypeError( + f"tuple_shapes is {str(tuple_shapes)}, but must be a list of " + "elements each convertible to TensorShape: got error " + f"{str(e)}") from e + if self._frozen: + for (frozen, updated) in zip(self._tuple_shapes, tuple_shapes): + if frozen != updated: + raise ValueError( + "Trying to update InfeedQueue with frozen configuration with an " + "incompatible shape. Frozen shapes are " + f"{str(self._tuple_shapes)}, updated shapes are " + f"{str(tuple_shapes)}") + + else: + self._tuple_shapes = tuple_shapes + self._validate() + + @property + def sharding_policies(self): + """Returns the sharding policies of the InfeedQueue tuple elements.""" + return self._sharding_policies + + @property + def shard_dimensions(self): + """Gets the shard dimension of each tuple element. + + Returns: + A list of length number_of_tuple_elements, where each list entry + is the shard dimension of that tuple element or None if the + shard dimension has not been set. + """ + # The number of shards is always the same for all the policies. + return [policy.shard_dimension for policy in self._sharding_policies] + + def set_shard_dimensions(self, shard_dimensions): + """Sets the shard_dimension of each element of the queue. + + shard_dimensions must be a list of length + self.number_of_tuple_elements, and each element must be + convertible to a Dimension compatible with self.tuple_shapes. + + Args: + shard_dimensions: the dimensions of each queue element. + + Raises: + ValueError: if shard_dimensions is not of length + self.number_of_tuple_elements; or an element of + shard_dimensions cannot be converted to a Dimension; or an + element of shard_dimensions is a Dimension that is out of + range for the corresponding tuple element shape. + """ + if len(shard_dimensions) != self.number_of_tuple_elements: + raise ValueError(f"shard_dimensions is {str(shard_dimensions)}, but must " + f"be a list of length {self.number_of_tuple_elements}") + for (policy, dimension) in zip(self._sharding_policies, shard_dimensions): + policy.set_shard_dimension(dimension) + self._validate() + + @property + def number_of_shards(self): + """Gets the number of shards to use for the InfeedQueue. + + Returns: + Number of shards or None if the number of shards has not been set. + """ + # The number of shards is always the same for all the policies. + return self._sharding_policies[0].number_of_shards + + def set_number_of_shards(self, number_of_shards): + """Sets the number of shards to use for the InfeedQueue. + + Args: + number_of_shards: number of ways to shard the InfeedQueue. + + Raises: + ValueError: if number_of_shards is not > 0; or the policies have + been frozen and number_of_shards was already set to something + else. + """ + for policy in self._sharding_policies: + policy.set_number_of_shards(number_of_shards) + policy.set_number_of_partitions(self._number_of_partitions) + self._validate() + + def set_configuration_from_input_tensors(self, input_tensors): + """Sets the shapes and types of the queue tuple elements. + + input_tensors is a list of Tensors whose types and shapes are used + to set the queue configuration. + + Args: + input_tensors: list of Tensors of the same types and shapes as + the desired queue Tuple. + + Raises: + ValueError: if input_tensors is not a list of length + self.number_of_tuple_elements + """ + if len(input_tensors) != self.number_of_tuple_elements: + raise ValueError(f"input_tensors is {str(input_tensors)}, but should be " + f"a list of {self.number_of_tuple_elements} Tensors") + self.set_tuple_shapes([t.shape for t in input_tensors]) + self.set_tuple_types([t.dtype for t in input_tensors]) + + def set_configuration_from_sharded_input_tensors(self, input_tensors): + """Sets the shapes and types of the queue tuple elements. + + input_tensors is a list of lists of Tensors whose types and shapes are used + to set the queue configuration. The length of the outer list is the number + of shards required, and each inner list is the tuple of Tensors to use to + determine the types and shapes of the corresponding shard. This method + depends on the shard dimension, and calling it freezes the shard policy. + + Args: + input_tensors: list of lists of Tensors. The outer list length corresponds + to the desired number of shards, and each inner list is the size + and shape of the desired configuration of the corresponding shard. + + Raises: + ValueError: if any inner list is not a list of length + self.number_of_tuple_elements; or the inner lists do not combine to + form a consistent unsharded shape. + TypeError: if the types of the Tensors in the inner lists do not match. + """ + if not self._frozen: + # Unset the tuple shapes in case the configuration becomes + # transiently inconsistent. + self._tuple_shapes = None + number_of_shards = len(input_tensors) + self.set_number_of_shards(number_of_shards) + for t in input_tensors: + if len(t) != self.number_of_tuple_elements: + raise ValueError( + f"input_tensors is {str(input_tensors)} but must be a list of " + "lists, where each inner list has length " + f"number_of_tuple_elements={self.number_of_tuple_elements}") + # Transpose the inputs to make a list of shard shapes for each tuple + # element. + sharded_shapes = [[t[i].shape + for t in input_tensors] + for i in range(self.number_of_tuple_elements)] + # For each tuple, get the unsharded shape using that tuple's policy. + unsharded_shapes = [ + policy.get_unsharded_shape(s) + for (policy, s) in zip(self._sharding_policies, sharded_shapes) + ] + self.set_tuple_shapes(unsharded_shapes) + for i in range(1, self.number_of_shards): + for (t1, t2) in zip(input_tensors[0], input_tensors[i]): + if t1.dtype != t2.dtype: + raise TypeError( + "types of the tuple elements of input_tensors " + f"{str(input_tensors)} are not consistent") + self.set_tuple_types([t.dtype for t in input_tensors[0]]) + + def freeze(self): + """Freezes the InfeedQueue so it can no longer be modified. + + The configuration is implicitly frozen before any host-side or + device-side Ops are generated. The configuration cannot be frozen + until the types and shapes of the tuple elements have been set. + + Raises: + ValueError: if the types or shapes of the tuple elements have not been + set. + """ + self._frozen = True + if self._tuple_types is None: + raise ValueError( + "Can't freeze an InfeedQueue without setting all tuple types.") + if self._tuple_shapes is None: + raise ValueError( + "Can't freeze an InfeedQueue without setting all tuple shapes.") + for shape in self._tuple_shapes: + if shape.dims is None: + raise ValueError( + "Can't freeze an InfeedQueue without setting all tuple shapes.") + for policy in self._sharding_policies: + policy.freeze() + self._validate() + + def generate_dequeue_op(self, tpu_device=0): + """Generates the device-side Op to dequeue a tuple from the queue. + + Implicitly freezes the queue configuration if it is not already + frozen, which will raise errors if the shapes and types have not + been fully specified. + + Args: + tpu_device: The TPU device ordinal where the infeed instruction should be + placed. If None, no explicit placement will be performed, and it is up + to the user to call this API from within a proper TPU device scope. + The XLA code will fail if the TPU dequeue instruction is not bound to + any device. + + Returns: + A list of Outputs corresponding to a shard of infeed dequeued + into XLA, suitable for use within a replicated block. + + Raises: + ValueError: if the types or shapes of the tuple elements have not been + set; or if a dequeue op has already been generated. + """ + self.freeze() + if self._generated_dequeue_op and not ops.inside_function(): + raise ValueError("Can't generate two dequeue Ops from the same queue") + self._generated_dequeue_op = True + full_name = "%s/dequeue" % self._name + sharded_shapes = [ + policy.get_unpartitioned_shape(policy.get_sharded_shape(shape)) + for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies) + ] + if tpu_device is not None: + with ops.device(tpu_name_util.core(tpu_device)): + dequeue_op = tpu_ops.infeed_dequeue_tuple( + dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name) + else: + dequeue_op = tpu_ops.infeed_dequeue_tuple( + dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name) + if self._number_of_partitions <= 1: + return dequeue_op + partitions = [ + policy.get_unpartitioned_shape([1] * shape.ndims).as_list() + for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies) + ] + return tag_sharding_attribute_for_dequeued_tensors(dequeue_op, partitions) + + def _generate_enqueue_op(self, + inputs, + name_prefix, + index, + device=None, + tpu_ordinal=-1): + """Generate a host-side Op to enqueue a tuple to the queue. + + If device is None the inputs are all required to have the same + device specification, and the enqueue Op is colocated with + inputs[0]. Otherwise the enqueue Op is placed on 'device'. + + Args: + inputs: a list of Tensors with the types and shapes of the tuple elements. + name_prefix: the base name for the Op. + index: the shard index, used to uniquify the Op name. + device: device to place the Op on, or None if it should be + colocated with the inputs. + tpu_ordinal: ordinal of the TPU device on the host to use for + infeed if device is a CPU device. Should be set to -1 if device + is a TPU device. + + Returns: + An Op corresponding to a shard of infeed enqueued at the host, + suitable for use within a replicated block. + + Raises: + ValueError: if device is None and inputs do not all have the + same device specification. + """ + full_name = "%s/%d" % (name_prefix, index) + shapes = [t.shape for t in inputs] + if device is None: + devices = [t.device for t in inputs] + for i in range(1, self.number_of_tuple_elements): + if devices[0] != devices[i]: + raise ValueError( + f"input devices for shard {index} are {str(devices)}, but should " + "all be the same") + with ops.colocate_with(inputs[0]): + return tpu_ops.infeed_enqueue_tuple( + inputs=inputs, + shapes=shapes, + name=full_name, + device_ordinal=tpu_ordinal) + else: + with ops.device(device): + return tpu_ops.infeed_enqueue_tuple( + inputs=inputs, + shapes=shapes, + name=full_name, + device_ordinal=tpu_ordinal) + + def generate_enqueue_ops(self, + sharded_inputs, + tpu_ordinal_function=None, + placement_function=None): + """Generates the host-side Ops to enqueue the shards of a tuple. + + sharded_inputs is a list, one for each shard, of lists of + Tensors. sharded_inputs[i] is the tuple of Tensors to use to feed + shard i of the queue. Returns the host-side Ops that must be run to + enqueue the sharded tuple. The Op for shard i is colocated with the inputs + for shard i. + + Implicitly freezes the queue configuration if it is not already + frozen. If the configuration has already been frozen, and is not + compatible with the types and shapes of sharded_inputs, an error + will be raised. + + Args: + sharded_inputs: a list of lists of Tensors. The length of the outer list + determines the number of shards. Each inner list indicates the types + and shapes of the tuples in the corresponding shard. + tpu_ordinal_function: if not None, a function that takes the + shard index as input and returns the ordinal of the TPU device + the shard's infeed should be placed on. tpu_ordinal_function must be + set if the inputs are placed on CPU devices. + placement_function: if not None, a function that takes the shard index as + input and returns the host device where the enqueue op should be placed + on. + + Returns: + A list of host-side Ops, one for each shard, that when executed together + will enqueue a full-size element of infeed. + + Raises: + ValueError: if the queue configuration has previously been frozen and the + shapes of the elements of sharded_inputs are not compatible with the + frozen configuration; or if the shapes of the elements of sharded_inputs + don't form a consistent unsharded tuple; or if the elements of a tuple + have different device constraints. + TypeError: if the queue configuration has previously been frozen and the + types of the elements of sharded_inputs are not compatible with the + frozen configuration; or if the types of the elements of sharded_inputs + don't form a consistent unsharded tuple. + """ + self.set_configuration_from_sharded_input_tensors(sharded_inputs) + self.freeze() + if self._generated_enqueue_ops and not ops.inside_function(): + raise ValueError("Can't generate two enqueue Ops from the same queue") + self._generated_enqueue_ops = True + if tpu_ordinal_function is None: + tpu_ordinal_function = lambda index: -1 + name_prefix = "%s/enqueue" % self._name + return [ + self._generate_enqueue_op( + shard, + name_prefix, + index, + tpu_ordinal=tpu_ordinal_function(index), + device=placement_function(index) if placement_function else None) + for (shard, index) in zip(sharded_inputs, range(self.number_of_shards)) + ] + + # TODO(misard) Generalize this to the case of systems that don't + # have 8 devices per host, and figure out what to do with + # model-parallelism. + def _default_placement_function(self, index): + return "/task:%d/device:CPU:0" % (index / 8) + + def _default_ordinal_function(self, index): + return index % 8 + + # TODO(b/36470756) remove this from tutorials once we have a better story + # for automatic placement of input pipelines. + def split_inputs_and_generate_enqueue_ops(self, + inputs, + device_assignment=None, + placement_function=None, + tpu_ordinal_function=None): + """POORLY-PERFORMING ON MULTI-HOST SYSTEMS. + + Generates the host-side Ops to enqueue a tuple. + + This method performs poorly because it takes an entire input on a single + host, splits it, and distributes it to all of the cores. It is present only + to simplify tutorial examples. + + inputs is a list of Tensors to use to feed the queue. Each input is split + into self.number_of_shards shards. Returns an Op for each shard to enqueue + the shard. The Op for shard i is placed on device placement_function(i). + + Implicitly freezes the queue configuration if it is not already + frozen. If the configuration has already been frozen, and is not + compatible with the types and shapes of inputs, an error + will be raised. + + Args: + inputs: a list of Tensors which indicates the types and shapes of the + queue tuple. + device_assignment: if not `None`, a TPU `DeviceAssignment`. If + device_assignment is not `None`, but `placement_function` and + `ordinal_function` are None, then `device_assignment` will be used to + place infeeds on the first k TPU shards, where k is the number of shards + in the queue. If all three are `None`, then default placement and + ordinal functions are used. + placement_function: if not None, a function that takes the shard + index as input and returns a device string indicating which + device the shard's infeed should be placed on. If placement_function + and tpu_ordinal_function are None, inputs are sharded round-robin + across the devices in the system. + tpu_ordinal_function: if not None, a function that takes the + shard index as input and returns the ordinal of the TPU device + the shard's infeed should be placed on. If placement_function + and tpu_ordinal_function are None, inputs are sharded round-robin + across the devices in the system. + + Returns: + A list of host-side Ops, one for each shard, that when executed together + will enqueue a full-size element of infeed. + + Raises: + ValueError: if the queue configuration has previously been frozen and the + shapes of the elements of inputs are not compatible with the frozen + configuration. + TypeError: if the queue configuration has previously been frozen and the + types of the elements of inputs are not compatible with the frozen + configuration. + """ + if device_assignment is None: + if placement_function is None: + placement_function = self._default_placement_function + if tpu_ordinal_function is None: + tpu_ordinal_function = self._default_ordinal_function + else: + + def _placement_function_from_map(index): + return device_assignment.host_device(replica=index) + + def _ordinal_function_from_map(index): + return device_assignment.tpu_ordinal(replica=index) + + if placement_function is None: + placement_function = _placement_function_from_map + if tpu_ordinal_function is None: + tpu_ordinal_function = _ordinal_function_from_map + self.set_configuration_from_input_tensors(inputs) + self.freeze() + if self._generated_enqueue_ops and not ops.inside_function(): + raise ValueError("Can't generate two enqueue Ops from the same queue") + self._generated_enqueue_ops = True + split_name_prefix = "%s/split" % self._name + if self.number_of_shards == 1: + transposed_sharded_inputs = [[inp] for inp in inputs] + else: + + def split_fn(inp, num_shards, axis, name): + with ops.colocate_with(inp): + return array_ops.split(inp, num_shards, axis=axis, name=name) + + transposed_sharded_inputs = [ + split_fn( + inp, + self.number_of_shards, + axis=policy.shard_dimension, + name="%s/%d" % (split_name_prefix, index)) + for (inp, policy, index) in zip(inputs, self._sharding_policies, + range(self.number_of_tuple_elements)) + ] + sharded_inputs = [[shard[i] + for shard in transposed_sharded_inputs] + for i in range(self.number_of_shards)] + name_prefix = "%s/enqueue" % self._name + return [ + self._generate_enqueue_op( + shard, + name_prefix, + index, + device=placement_function(index), + tpu_ordinal=tpu_ordinal_function(index)) + for (shard, index) in zip(sharded_inputs, range(self.number_of_shards)) + ] + + +class _PartitionedInfeedQueue(InfeedQueue): + """A helper object to build a device infeed queue with input partition. + + Args: + number_of_tuple_elements: the number of Tensors fed atomically through the + queue, must be present unless it can be inferred from other arguments. + device_assignment: A TPU `DeviceAssignment` which is used to place all the + partitions to different TPU infeed queues. + host_id: The id of the host machine. + input_partition_dims: A nested list/tuple of integers. Each inner + list/tuple describes how to partition the corresponding input tensor. + tuple_types: If not None, a list of types of the elements of the queue. + tuple_shapes: If not None, a list of shapes of the elements of the queue. + name: The name of the queue. + """ + + def __init__(self, + number_of_tuple_elements, + device_assignment, + host_id, + input_partition_dims=None, + tuple_types=None, + tuple_shapes=None, + name=None): + super(_PartitionedInfeedQueue, self).__init__( + number_of_tuple_elements=number_of_tuple_elements, + tuple_types=tuple_types, + tuple_shapes=None, + shard_dimensions=None, + name="PartitionedInfeedQueue" if name is None else name) + self._input_partition_dims = input_partition_dims + self._host_id = host_id + self._device_assignment = device_assignment + + def generate_dequeue_op(self, tpu_device=0): + """Generate TPU dequeue ops. + + Args: + tpu_device: The TPU device ordinal where the infeed instruction should be + placed. + + Returns: + A list of Outputs corresponding to a partition of infeed dequeued + into XLA, suitable for use within a replicated block. + + Raises: + ValueError: if the types or shapes of the tuple elements have not been + set; or if a dequeue op has already been generated. + """ + self.freeze() + if self._generated_dequeue_op and not ops.inside_function(): + raise ValueError("Can't generate two dequeue Ops from the same queue") + self._generated_dequeue_op = True + full_name = "%s/dequeue" % self._name + sharded_shapes = [ + policy.get_sharded_shape(shape) + for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies) + ] + with ops.device(tpu_name_util.core(tpu_device)): + values = tpu_ops.infeed_dequeue_tuple( + dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name) + return tag_sharding_attribute_for_dequeued_tensors( + values, self._input_partition_dims) + + def generate_enqueue_ops(self, sharded_inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks + """Generates the host-side Ops to enqueue the partitioned inputs. + + sharded_inputs is a list, one for each replica, of lists of + Tensors. sharded_inputs[i] is the tuple of Tensors to use to feed + replica i. + sharded_inputs[i][j] is partitioned by self._input_partition_dims[j]. + + For example, if sharded_inputs[i][j] is a 2-D Tensor: + [[A, B, C, D], + [E ,F, G, H]] + self._input_partition_dims[j] is [2, 4]. + + sharded_inputs[i][j] will be partitioned and flattened into: + [A, B, C, D, E, F, G, H] and fed into the logical core ids: + [0, 1, 2, 3, 4, 5, 6, 7] respectively. + + Args: + sharded_inputs: a list of lists of Tensors. The length of the + outer list determines the number of shards. Each inner list indicates + the types and shapes of the tuples in the corresponding shard. + + Returns: + A list of host-side Ops, one for each shard, that when executed together + will enqueue a full-size element of infeed. + + Raises: + ValueError: if the queue configuration has previously been frozen and the + shapes of the elements of sharded_inputs are not compatible with the + frozen configuration; or if the shapes of the elements of sharded_inputs + don't form a consistent unsharded tuple; or if the elements of a tuple + have different device constraints; or if the partition dims are invalid. + TypeError: if the queue configuration has previously been frozen and the + types of the elements of sharded_inputs are not compatible with the + frozen configuration; or if the types of the elements of sharded_inputs + don't form a consistent unsharded tuple. + """ + self.set_configuration_from_sharded_input_tensors(sharded_inputs) + number_of_replicas = len(sharded_inputs) + number_of_tuple_elements = len(sharded_inputs[0]) + + assert len(self._input_partition_dims) == number_of_tuple_elements + enqueue_ops = [] + + for replica_index in range(number_of_replicas): + flattened_inputs = sharded_inputs[replica_index] + inputs_part_dims_flat = nest.flatten_up_to(flattened_inputs, + self._input_partition_dims) + inputs_parted_iters = [ + iter(self._check_dims_and_partition_or_replicate_on_host(x, dims)) + for x, dims in zip(sharded_inputs[replica_index], + inputs_part_dims_flat) + ] + + # Find the replica_id of the host's logical core 0. + # The self._host_id is guaranteed to contain the logical core 0, + # even when num_cores_per_replica > num_cores_per_host -- the function + # caller makes sure that this host_id will must be receiving data (calls + # input_fn). + replica_id = self._device_assignment.lookup_replicas( + task_id=self._host_id, logical_core=0)[replica_index] + for logical_core in range(self._device_assignment.num_cores_per_replica): + # Places different partitions to different logic cores. + # Since there can be multiple hosts per replica, we need to find + # the actual host (device) of this logical core. + device = self._device_assignment.host_device( + replica=replica_id, logical_core=logical_core) + + with ops.device(device): + ordinal = self._device_assignment.tpu_ordinal( + replica=replica_id, logical_core=logical_core) + infeed_inputs = [] + for it in inputs_parted_iters: + input_for_device = next(it, None) + if input_for_device is not None: + infeed_inputs.append(input_for_device) + + if infeed_inputs: + enqueue_ops.append( + tpu_ops.infeed_enqueue_tuple( + inputs=infeed_inputs, + shapes=[x.shape for x in infeed_inputs], + name="enqueue/replica_{0}/input_{1}".format( + replica_index, logical_core), + device_ordinal=ordinal)) + return enqueue_ops + + def _check_input_partition_dims(self, tensor, dims): + """Checks that input partition dims are valid for the `Tensor`. + + Args: + tensor: Input tensor for partitioning. + dims: A list of integer describes how to partition the input tensor. + + Raises: + ValueError: If the tensor can't be partitioned by dims or the + num_cores_per_replica doesn't match the number of + partitions(dims.prod()). + """ + # No partitioning specified, so don't perform further checks. + if dims is None: + return + + dims = np.array(dims) + + if (dims < 1).any(): + raise ValueError("All input partition dims must be >= 1.") + + # No partitioning, so don't perform further checks. + if dims.prod() == 1: + return + + if dims.prod() != self._device_assignment.num_cores_per_replica: + raise ValueError( + "The product of each input partition dim should equal to " + "num_cores_per_replica. (dim = {}, num_cores_per_replica " + "= {})".format(dims, self._device_assignment.num_cores_per_replica)) + if dims.shape[0] != tensor.shape.ndims: + raise ValueError( + "Input partition dims must have the same number of dimensions " + "as the `Tensor` to be partitioned. (tensor shape = {}, input " + "partition dims = {}).".format(tensor.shape.as_list(), dims)) + + tensor.shape.assert_is_fully_defined() + + def _check_dims_and_partition_or_replicate_on_host(self, tensor, dims): + """Checks dims and partitions or replicates the input tensor. + + The ops inside this function are placed on the host side. + + Args: + tensor: The input tensor which will be partitioned or replicated. + dims: A list of integer describes how to partition the input tensor. + + Returns: + An iterator of `Tensor`s or a list of partitioned tensors. + """ + self._check_input_partition_dims(tensor, dims) + return partition_or_replicate_on_host(tensor, dims) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_function.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_function.py new file mode 100644 index 0000000000000000000000000000000000000000..136c35f4fa42987d126dfd049ccef7bd5d3fd678 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_function.py @@ -0,0 +1,64 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Helper library for functions used during TPU compilation.""" + +import contextlib +import threading + + +class TpuContext(threading.local): + """A context object holding state about the TPU computation being built.""" + + def __init__(self): + """Creates a new TpuContext.""" + self._number_of_shards = None + + @property + def number_of_shards(self): + return self._number_of_shards + + def set_number_of_shards(self, number_of_shards): + self._number_of_shards = number_of_shards + + +# The Tpu context holds the number of shards when a sharded computation is +# being built, or None if no computation is being built. +_current_tpu_context = TpuContext() + + +@contextlib.contextmanager +def tpu_shard_context(number_of_shards): + """A context manager setting current number of shards.""" + if _current_tpu_context.number_of_shards is not None: + raise NotImplementedError("tpu_shard_context cannot be nested") + try: + _current_tpu_context.set_number_of_shards(number_of_shards) + yield + finally: + _current_tpu_context.set_number_of_shards(None) + + +def get_tpu_context(): + return _current_tpu_context + + +# Decorator function for tpu computation func that was passed to tpu.rewrite() +# if there is an embedded training loop in this func, trace tools will generate +# step markers for each iteration. +def on_device_training_loop(func): + # Value for this attribute is from tensorflow.compiler.xla.DebugOptions.StepMarkerLocation. + setattr(func, "step_marker_location", "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP") + return func diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_hardware_feature.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_hardware_feature.py new file mode 100644 index 0000000000000000000000000000000000000000..23ff5658e1d27253f1377f18697c8e670dd1a076 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_hardware_feature.py @@ -0,0 +1,81 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TPU hardware feature info.""" +import enum +from tensorflow.core.protobuf.tpu import topology_pb2 +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("tpu.experimental.HardwareFeature") +class HardwareFeature(object): + """class holds all the feature info about the TPU.""" + + def __init__(self, tpu_hardware_feature_proto): + """Store TPU hardware feature info. + + Args: + tpu_hardware_feature_proto: protobuf which describe the tpu hardware + feature. + """ + self.tpu_hardware_feature_proto = tpu_hardware_feature_proto + + class EmbeddingFeature(enum.Enum): + """Embedding feature flag strings. + + UNSUPPORTED: No embedding lookup accelerator available on the tpu. + V1: Embedding lookup accelerator V1. The embedding lookup operation can only + be placed at the beginning of computation. Only one instance of + embedding + lookup layer is allowed. + V2: Embedding lookup accelerator V2. The embedding lookup operation can be + placed anywhere of the computation. Multiple instances of embedding + lookup layer is allowed. + """ + UNSUPPORTED = "UNSUPPORTED" + V1 = "V1" + V2 = "V2" + + @classmethod + def _embedding_feature_proto_to_string(cls, embedding_feature_proto): + """Convert the embedding feature proto to enum string.""" + embedding_feature_proto_to_string_map = { + topology_pb2.TPUHardwareFeature.EmbeddingFeature.UNSUPPORTED: + HardwareFeature.EmbeddingFeature.UNSUPPORTED, + topology_pb2.TPUHardwareFeature.EmbeddingFeature.V1: + HardwareFeature.EmbeddingFeature.V1, + topology_pb2.TPUHardwareFeature.EmbeddingFeature.V2: + HardwareFeature.EmbeddingFeature.V2 + } + return embedding_feature_proto_to_string_map.get( + embedding_feature_proto, HardwareFeature.EmbeddingFeature.UNSUPPORTED) + + @property + def embedding_feature(self): + """TPU embedding feature. + + Returns: + An EmbeddingFeature enum. + """ + return HardwareFeature._embedding_feature_proto_to_string( + self.tpu_hardware_feature_proto.embedding_feature) + + @property + def num_embedding_devices_per_chip(self): + """Number of embedding accelerator devices per chip. + + Returns: + Number of embedding devices per chip. + """ + return self.tpu_hardware_feature_proto.num_embedding_devices_per_chip diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_name_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_name_util.py new file mode 100644 index 0000000000000000000000000000000000000000..a201fe2d20830180b80fdafbbb2f0a861fecc86a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_name_util.py @@ -0,0 +1,31 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ====================================== +"""Helper functions for TPU device names.""" + +from typing import Text +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["tpu.core"]) +def core(num: int) -> Text: + """Returns the device name for a core in a replicated TPU computation. + + Args: + num: the virtual core number within each replica to which operators should + be assigned. + Returns: + A device name, suitable for passing to `tf.device()`. + """ + return "device:TPU_REPLICATED_CORE:{}".format(num) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_optimizer.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..07ad03e05a0450e2679fe2a9f5aedf3be43c2f4f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_optimizer.py @@ -0,0 +1,222 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Optimizer that implements cross-shard gradient reduction for TPU.""" + + +from tensorflow.python.framework import ops +from tensorflow.python.ops.losses import losses +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.tpu import tpu_function +from tensorflow.python.tpu.ops import tpu_ops +from tensorflow.python.training import optimizer +from tensorflow.python.util.tf_export import tf_export + + +@tf_export(v1=["tpu.CrossShardOptimizer"]) +class CrossShardOptimizer(optimizer.Optimizer): + """An optimizer that averages gradients across TPU shards.""" + + def __init__(self, + opt, + reduction=losses.Reduction.MEAN, + name="CrossShardOptimizer", + group_assignment=None): + """Construct a new cross-shard optimizer. + + Args: + opt: An existing `Optimizer` to encapsulate. + reduction: The reduction to apply to the shard losses. + name: Optional name prefix for the operations created when applying + gradients. Defaults to "CrossShardOptimizer". + group_assignment: Optional 2d int32 lists with shape + [num_groups, num_replicas_per_group] which describles how to apply + optimizer to subgroups. + + Raises: + ValueError: If reduction is not a valid cross-shard reduction. + """ + accepted_reductions = (losses.Reduction.SUM, losses.Reduction.MEAN) + if reduction not in accepted_reductions: + raise ValueError( + f"Argument `reduction` should be one of {accepted_reductions}. " + f"Received: {reduction}") + if not isinstance(opt, optimizer.Optimizer): + raise TypeError( + "CrossShardOptimizer only works with tf.training.Optimizer and not " + f"Keras Optimizer. Received: {opt}. " + "If you are using TPUStrategy, " + "Keras Optimizer will sum gradients across replicas." + "If you want to average your gradients, rescale your loss with: " + "`loss /= global_batch_size`") + + super(CrossShardOptimizer, self).__init__(False, name) + self._opt = opt + self._reduction = reduction + self._group_assignment = group_assignment + + def _verify_and_get_subgroup_size(self, group_assignment, num_shards): + """Verify group_assignment and get the subgroup size". + + Args: + group_assignment: list of group ids for applying the optimizer + to subgroups. + num_shards: The number of TPU shards. + + Returns: + The size of one subgroup in group_assignment. + + Raises: + ValueError: If group_assignment is invalid. + """ + if not group_assignment: + return None + if not (isinstance(group_assignment, list) and + all(isinstance(i, list) for i in group_assignment)): + raise ValueError( + f"Argument `group_assignment` must be a list of lists. " + f"Received: {group_assignment}") + + replica_ids = set() + for g in group_assignment: + for i in g: + replica_ids.add(i) + + if set(range(num_shards)) != replica_ids: + raise ValueError( + f"Argument `group_assignment` must be a permutation of " + f"range({num_shards}). Received: {group_assignment}") + + subgroup_size_list = [len(group) for group in group_assignment] + if all(subgroup_size_list[0] == size for size in subgroup_size_list): + return subgroup_size_list[0] + else: + raise ValueError("The size of each subgroup in `group_assignment` must " + f"be equal. Received: {group_assignment}") + + def compute_gradients(self, loss, var_list=None, **kwargs): + """Compute gradients of "loss" for the variables in "var_list". + + This simply wraps `compute_gradients()` from the real optimizer. The + gradients will be aggregated in `apply_gradients()` so that user can + modify the gradients like clipping with per replica global norm if needed. + The global norm with aggregated gradients can be bad as one replica's huge + gradients can hurt the gradients from other replicas. + + When the CrossShardOptimizer is constructed with + `reduction == losses.Reduction.MEAN` (default), this function scales the + loss by `1.0 / num_shards` before computing the gradients. Assuming the + optimizer uses the default implementation of `compute_gradients()`, the + gradients of the scaled loss are scaled by `1.0 / num_shards` compared to + the gradients of the original loss. This scaling factor is important because + `apply_gradients()` sums gradients across shards, rather than averaging + them. However, the scaling factor must be taken into account when clipping + the norm of the gradients or performing other postprocessing. + + Args: + loss: A Tensor containing the value to minimize. + var_list: Optional list or tuple of `tf.Variable` to update to minimize + `loss`. Defaults to the list of variables collected in the graph + under the key `GraphKey.TRAINABLE_VARIABLES`. + **kwargs: Keyword arguments for compute_gradients(). + + Returns: + A list of (gradient, variable) pairs. + + Raises: + ValueError: If not within a tpu_shard_context or group_assignment is + invalid. + """ + num_shards = tpu_function.get_tpu_context().number_of_shards + if num_shards is None: + logging.warning( + "CrossShardOptimizer should be used within a tpu_shard_context, but " + "got unset number_of_shards. Assuming 1.") + num_shards = 1 + + subgroup_size = self._verify_and_get_subgroup_size(self._group_assignment, + num_shards) + + if num_shards > 1 and self._reduction == losses.Reduction.MEAN: + if self._group_assignment: + scale = 1.0 / subgroup_size + else: + scale = 1.0 / num_shards + loss *= scale + + return self._opt.compute_gradients(loss, var_list=var_list, **kwargs) + + def apply_gradients(self, grads_and_vars, global_step=None, name=None): + """Apply gradients to variables. + + Calls tpu_ops.cross_replica_sum() to sum gradient contributions across + replicas, and then applies the real optimizer. + + Args: + grads_and_vars: List of (gradient, variable) pairs as returned by + compute_gradients(). + global_step: Optional Variable to increment by one after the + variables have been updated. + name: Optional name for the returned operation. Default to the + name passed to the Optimizer constructor. + + Returns: + An `Operation` that applies the gradients. If `global_step` was not None, + that operation also increments `global_step`. + + Raises: + ValueError: If the grads_and_vars is malformed. + """ + summed_grads_and_vars = [] + for (grad, var) in grads_and_vars: + if grad is None: + summed_grads_and_vars.append((grad, var)) + else: + with ops.colocate_with(grad): + summed_grads_and_vars.append((tpu_ops.cross_replica_sum( + grad, self._group_assignment), var)) + return self._opt.apply_gradients(summed_grads_and_vars, global_step, name) + + def get_slot(self, *args, **kwargs): + """Return a slot named "name" created for "var" by the Optimizer. + + This simply wraps the get_slot() from the actual optimizer. + + Args: + *args: Arguments for get_slot(). + **kwargs: Keyword arguments for get_slot(). + + Returns: + The `Variable` for the slot if it was created, `None` otherwise. + """ + return self._opt.get_slot(*args, **kwargs) + + def get_slot_names(self, *args, **kwargs): + """Return a list of the names of slots created by the `Optimizer`. + + This simply wraps the get_slot_names() from the actual optimizer. + + Args: + *args: Arguments for get_slot(). + **kwargs: Keyword arguments for get_slot(). + + Returns: + A list of strings. + """ + return self._opt.get_slot_names(*args, **kwargs) + + def variables(self): + """Forwarding the variables from the underlying optimizer.""" + return self._opt.variables() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_sharding.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_sharding.py new file mode 100644 index 0000000000000000000000000000000000000000..3a78f45e5292931042b4fc1faae7c36006979bce --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_sharding.py @@ -0,0 +1,302 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Helper library for sharding during TPU compilation.""" + + +from tensorflow.python.framework import tensor_shape + +_DEFAULT_NUMBER_OF_SHARDS = 1 +_DEFAULT_SHARD_DIMENSION = 0 + + +# TODO(b/36777903) change other parts of tpu.py to use this class. +class ShardingPolicy(object): + """An object use to hold the sharding policy for a Tensor.""" + + def __init__(self): + self._number_of_shards = None + self._number_of_partitions = 1 + self._shard_dimension = None + self._frozen = False + + def __str__(self): + if self.number_of_shards is None or self.shard_dimension is None: + return "ShardingPolicy(unset)" + else: + return ("ShardingPolicy(%d shards dimension %d)" % + (self.number_of_shards, self.shard_dimension)) + + def _fill_default_values(self): + if self._number_of_shards is None: + self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS + if self._shard_dimension is None: + self._shard_dimension = tensor_shape.as_dimension( + _DEFAULT_SHARD_DIMENSION) + + def freeze(self): + """Prevents further modification to the sharding policy. + + Any values that have not been set when freeze is called are set to + defaults. If the ShardingPolicy is already frozen, this is a NoOp. + """ + if not self._frozen: + self._fill_default_values() + self._frozen = True + + @property + def number_of_shards(self): + """Returns the number of shards in the policy or None if unspecified.""" + return self._number_of_shards + + def set_number_of_shards(self, number_of_shards): + """Sets the number of shards for the current policy. + + If the policy has been frozen then number_of_shards must match the + existing setting. + + Args: + number_of_shards: The number of shards to use in the policy. + + Raises: + ValueError: If the policy has been frozen and number_of_shards + differs from the frozen value; or number_of_shards <= 0. + """ + if self._frozen: + if self._number_of_shards != number_of_shards: + raise ValueError( + f"Can't set sharding policy to use {number_of_shards} shards since " + f"it has been frozen to use {self._number_of_shards}") + else: + if number_of_shards > 0: + self._number_of_shards = number_of_shards + else: + raise ValueError( + f"Can't set sharding policy to use {number_of_shards} shards; " + "value must be > 0") + + @property + def number_of_partitions(self): + """Returns the number of partitions of the policy or None if unspecified.""" + return self._number_of_partitions + + def set_number_of_partitions(self, number_of_partitions): + """Sets the number of partitions for the current policy. + + If the policy has been frozen then shard_dimension must match the + existing setting. + + Args: + number_of_partitions: The number of partitions to use in the policy. + + Raises: + ValueError: If the policy has been frozen and shard_dimension + differs from the frozen value. + """ + if self._frozen: + if self._number_of_partitions != number_of_partitions: + raise ValueError( + f"Can't set number_of_partitions to {number_of_partitions} since " + f"it has been frozen to use {self._number_of_partitions}.") + else: + self._number_of_partitions = number_of_partitions + + @property + def shard_dimension(self): + """Returns the shard dimension of the policy or None if unspecified.""" + return self._shard_dimension + + def set_shard_dimension(self, shard_dimension): + """Sets the shard dimension for the current policy. + + If the policy has been frozen then shard_dimension must match the + existing setting. + + Args: + shard_dimension: The shard dimension to use in the policy. + + Raises: + ValueError: If the policy has been frozen and shard_dimension + differs from the frozen value, or shard_dimension can't be + interpreted as a Dimension. + """ + if self._frozen: + if self._shard_dimension != shard_dimension: + raise ValueError( + "Can't set shard dimension to %d since it has been frozen to " + "use %d." % (shard_dimension, self._shard_dimension)) + else: + self._shard_dimension = tensor_shape.as_dimension(shard_dimension) + + def merge(self, other): + """Merges the policy of another policy into the current policy. + + Args: + other: The policy to merge into this one. + + Raises: + ValueError: If this policy has been frozen and the merge conflicts with + the frozen policy. + """ + if other.number_of_shards is not None: + self.set_number_of_shards(other.number_of_shards) + if other.shard_dimension is not None: + self.set_shard_dimension(other.shard_dimension) + + def get_unpartitioned_shape(self, shape): + """Returns the shape of an unpartitioned Tensor. + + When given the shape of a 'sharded-size' Tensor, returns the shape + of the full shape of its unpartitioned Tensor. + + Args: + shape: The shape of the sharded Tensor. + + Returns: + The shape of the unpartitioned version of the Tensor. + + Raises: + ValueError: if shape has unknown sharded dimension + """ + shape = tensor_shape.as_shape(shape) + dims = shape.as_list() + if (self._shard_dimension is None or self._number_of_partitions is None or + not dims): + return None + if dims[self._shard_dimension] is None: + raise ValueError(f"Shape {shape.as_list()} must have a fixed size for " + f"dimension {self._shard_dimension} that is known. ") + if self._number_of_partitions > 1: + dims[self._shard_dimension] *= self._number_of_partitions + return tensor_shape.as_shape(dims) + + def get_sharded_shape(self, shape, shard_index=None): + """Returns the shape of a shard of a full Tensor. + + When given the shape of a 'full-size' Tensor, returns the shape of + the sub-Tensor after it has been sharded. Freezes the policy if it + has not yet been frozen. + + Args: + shape: The shape of the full-size Tensor to be sharded. + shard_index: The index of the shard whose shape should be returned. + shard_index can be None for sharding policies that use the same shape + for every shard. + + Returns: + The shape of the sharded version of the Tensor. + + Raises: + ValueError: If shard_index is None when shards are of different + shapes; or shard_index is not None and + !(0<=shard_index= self.number_of_shards: + raise ValueError( + f"Requested shard_index {shard_index}, but shard_index must be in " + f"[0,{self._number_of_shards}).") + shape = tensor_shape.as_shape(shape) + if self._number_of_shards == 1: + # Don't do anything when there's only one shard. + return shape + ndims = shape.ndims + if ndims is None: + raise ValueError(f"Shape {shape} must be a known shape.") + if ndims <= self._shard_dimension: + raise ValueError( + f"Shape {shape.as_list()} does not contain shard_dimension " + f"{self._shard_dimension}") + dims = shape.as_list() + if dims[self._shard_dimension] is None: + raise ValueError( + f"Shape {shape.as_list()} must have a fixed size for dimension " + f"{self._shard_dimension} that is known at construction time.") + if (dims[self._shard_dimension] % self._number_of_shards) != 0: + raise ValueError( + f"Shape {shape.as_list()} cannot be sharded {self._number_of_shards} " + f"ways along dimension {self._shard_dimension}") + dims[self._shard_dimension] //= self._number_of_shards + return tensor_shape.TensorShape(dims) + + def _unshard_shape(self, shape): + """Return the unsharded shape that would generate a given sharded shape. + + Args: + shape: the sharded shape to unshard + + Returns: + The unsharded shape. + + Raises: + ValueError: if shape is unknown or does not contain + self.shard_dimension + TypeError: if shape is not convertible to a TensorShape + """ + shape = tensor_shape.as_shape(shape) + if self._number_of_shards == 1: + # Don't do anything when there's only one shard. + return shape + ndims = shape.ndims + if ndims is None: + raise ValueError(f"Shape {shape} must be statically known.") + if ndims <= self._shard_dimension: + raise ValueError(f"Shape {shape.as_list()} does not contain " + f"shard_dimension {self._shard_dimension}. " + f"Rank is too small.") + dims = shape.as_list() + dims[self._shard_dimension] *= self._number_of_shards + return tensor_shape.TensorShape(dims) + + def get_unsharded_shape(self, shapes): + """Returns the shape of an unsharded Tensor given a list of shards. + + When given a list of shapes of shards, returns the shape of the + unsharded Tensor that would generate the shards. Sets defaults for the + policy if number_of_shards or shard_dimension is None. + + Args: + shapes: The shapes of the Tensor shards to be combined. + + Returns: + The shape of the unsharded version of the Tensor. + + Raises: + ValueError: if shapes is not a list of length + self.number_of_shards; or any element of shapes is not a valid + shape consistent with the sharding policy; or the list of + shapes is not a valid sharding of a full shape. + TypeError: if an element of shapes is not convertible to a + TensorShape + """ + self._fill_default_values() + if len(shapes) != self.number_of_shards: + raise ValueError( + f"Shapes {shapes} is length {len(shapes)} but must be a list of " + f"length number_of_shards={self.number_of_shards}") + unsharded_shapes = [self._unshard_shape(s) for s in shapes] + for i in range(self.number_of_shards - 1): + if not unsharded_shapes[i].is_compatible_with( + unsharded_shapes[self.number_of_shards - 1]): + raise ValueError( + f"Sharded shapes {shapes} are not consistent shards of a full shape " + f"sharded {self.number_of_shards} ways along " + f"dimension {self.shard_dimension}.") + return unsharded_shapes[0] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_strategy_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_strategy_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e122d4816ae6b1182d6e590bc961c5a3aa886849 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_strategy_util.py @@ -0,0 +1,305 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TPU specific APIs to be used in conjunction with TPU Strategy.""" + +import gc + +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python.client import session as session_lib +from tensorflow.python.distribute.cluster_resolver import cluster_resolver as cluster_resolver_lib +from tensorflow.python.eager import context +from tensorflow.python.eager import def_function +from tensorflow.python.eager import monitoring +from tensorflow.python.framework import device +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.tpu import topology +from tensorflow.python.tpu import tpu +from tensorflow.python.util import compat + + +_INITIALIZED_TPU_SYSTEMS = {} +_LOCAL_MASTERS = ("", "local") + + +_tpu_worker_address = monitoring.StringGauge( + "/tensorflow/tpu/worker_address", + "The worker address that the coordinator/client connects to.", "address") + + +def initialize_tpu_system_impl(cluster_resolver, tpu_cluster_resolver_cls): + """Implementation for tpu.experimental.initialize_tpu_system. + + Kept separate to avoid tpu_oss code duplication. + + Initialize the TPU devices. + + Args: + cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, + which provides information about the TPU cluster. + tpu_cluster_resolver_cls: a reference to + tf.distribute.cluster_resolver.TPUClusterResolver so that an instance + of it can be initialized if cluster_resolver is None. + Returns: + The tf.tpu.Topology object for the topology of the TPU cluster. If called + inside tf.function, it returns the serialized topology object instead. + + Raises: + RuntimeError: If running inside a tf.function. + NotFoundError: If no TPU devices found in eager mode. + TypeError: If tpu_cluster_resolver_cls is + not tf.distribute.cluster_resolver.TPUClusterResolver. + """ + # check that tpu_cluster_resolver_cls is a + # tf.distribute.cluster_resolver.TPUClusterResolver + if tpu_cluster_resolver_cls is None or not issubclass( + tpu_cluster_resolver_cls, cluster_resolver_lib.ClusterResolver + ) or not hasattr(tpu_cluster_resolver_cls, "tpu_hardware_feature"): + raise TypeError( + "tpu_cluster_resolver_cls is not" + " tf.distribute.cluster_resolver.TPUClusterResolver.") + # Deallocate all TPU buffers by clearing out eager context caches and + # triggering garbage collection to avoid keeping invalid tpu buffer around + # after reinitialized tpu system. + logging.info("Deallocate tpu buffers before initializing tpu system.") + context.context()._clear_caches() # pylint: disable=protected-access + context.context().clear_kernel_cache() + gc.collect() + + job = None + if cluster_resolver is None: + # If no cluster resolver is specified, and running eagerly, execute the init + # ops in the current device scope. + if context.executing_eagerly(): + curr_device = device.DeviceSpec.from_string(context.context().device_name) + if curr_device.job is not None: + job = "{}/replica:0/task:0".format(curr_device.job) + + cluster_resolver = tpu_cluster_resolver_cls("") + assert isinstance(cluster_resolver, tpu_cluster_resolver_cls) + + tpu_name = compat.as_text(cluster_resolver._tpu) # pylint: disable=protected-access + if tpu_name in _INITIALIZED_TPU_SYSTEMS: + logging.warning( + "TPU system %s has already been initialized. " + "Reinitializing the TPU can cause previously created " + "variables on TPU to be lost.", tpu_name) + + logging.info("Initializing the TPU system: %s", tpu_name) + + # This function looks as it is for the following non-intuitive reasons. + # tpu.initialize_system creates a dummy op whose sole purpose is to trigger + # DistributedTPURewritePass. This pass actually adds real ops that + # initialize the TPU system. Thus, we can't simply run tpu.initialize_system + # eagerly. We need to wrap it in defun and trigger the rewrite passes on it. + if tpu_name not in _LOCAL_MASTERS: + # Explicitly place the tpu.initialize_system in the first worker to + # avoid the output node match multiple devices error. + job = "{}/replica:0/task:0".format(cluster_resolver.get_job_name()) + + if context.executing_eagerly(): + @def_function.function(autograph=False) + def _tpu_init_fn(): + # In TF1, we usually close chips when compilation fails to clear the data + # in infeed. In TF2, we don't need to do this because infeed is no longer + # used, so user can recover from TPU compilation failures more smoothly. + # Same for the cancellation of a TPU excution. + return tpu.initialize_system( + job=job, + compilation_failure_closes_chips=False, + tpu_cancellation_closes_chips=False) + + # The TPU_SYSTEM device must match the device used in tpu.initialize_system + # exactly, otherwise you can get errors if there are multiple TPU_SYSTEM + # devices available. + run_eagerly = def_function.functions_run_eagerly() + if run_eagerly: + logging.warning( + "It looks like tf.function behavior was disabled, perhaps using" + " tf.config.run_functions_eagerly." + " tf.tpu.experimental.initialize_tpu_system requires tf.function to" + " work. This primitive will override the disable." + ) + def_function.run_functions_eagerly(False) + try: + with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access + output = _tpu_init_fn() + context.async_wait() + except errors.InvalidArgumentError as e: + raise errors.NotFoundError( + None, None, + "TPUs not found in the cluster. Failed in initialization: " + + str(e)) + finally: + if run_eagerly is not None: + def_function.run_functions_eagerly(run_eagerly) + # Clear out the eager context caches since the memory is invalid now. + context.context()._initialize_logical_devices() # pylint: disable=protected-access + + serialized_topology = output.numpy() + elif not ops.executing_eagerly_outside_functions(): + master = cluster_resolver.master() + cluster_spec = cluster_resolver.cluster_spec() + + session_config = config_pb2.ConfigProto(allow_soft_placement=True) + if cluster_spec: + session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def()) + + with ops.Graph().as_default(): + with session_lib.Session(config=session_config, target=master) as sess: + serialized_topology = sess.run(tpu.initialize_system()) + else: + with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access + serialized_topology = tpu.initialize_system( + job=job, compilation_failure_closes_chips=False) + # If initialize_tpu_system is called inside tf.function, we only return + # the serialized topology object as the tf.tpu.Topology object has to be + # constructed in eager mode. + return serialized_topology + + logging.info("Finished initializing TPU system.") + tpu_topology = topology.Topology(serialized=serialized_topology) + cluster_resolver.set_tpu_topology(serialized_topology) + _INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology + + # Record the address of the TPU worker-0 that the coordinator connects to. + # This can be used to associate the TPU worker with the right coordinator when + # aggregating the metrics for the application. An example of the address: + # /bns/mb/borg/mb/bns/chienchunh/chienchunh_group_49640234.1.tfm_train_tpu_worker/0 + _tpu_worker_address.get_cell("address").set(cluster_resolver.get_master()) + + return tpu_topology + + +def get_initialized_tpu_systems(): + """Returns all currently initialized tpu systems. + + Returns: + A dictionary, with tpu name as the key and the tpu topology as the value. + """ + return _INITIALIZED_TPU_SYSTEMS.copy() + + +def shutdown_tpu_system_impl(cluster_resolver, tpu_cluster_resolver_cls): + """Implementation for tpu.experimental.shutdown_tpu_system. + + Kept separate to avoid tpu_oss code duplication. + + Shuts down the TPU devices. + + This will clear all caches, even those that are maintained through sequential + calls to tf.tpu.experimental.initialize_tpu_system, such as the compilation + cache. + + Args: + cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, + which provides information about the TPU cluster. + tpu_cluster_resolver_cls: a reference to + tf.distribute.cluster_resolver.TPUClusterResolver so that an instance + of it can be initialized if cluster_resolver is None. + + Raises: + RuntimeError: If no TPU devices found for eager execution or if run in a + tf.function. + TypeError: If tpu_cluster_resolver_cls is + not tf.distribute.cluster_resolver.TPUClusterResolver. + """ + # check that tpu_cluster_resolver_cls is a + # tf.distribute.cluster_resolver.TPUClusterResolver + if tpu_cluster_resolver_cls is None or not issubclass( + tpu_cluster_resolver_cls, cluster_resolver_lib.ClusterResolver + ) or not hasattr(tpu_cluster_resolver_cls, "tpu_hardware_feature"): + raise TypeError( + "tpu_cluster_resolver_cls is not" + " tf.distribute.cluster_resolver.TPUClusterResolver.") + + job = None + if cluster_resolver is None: + # If no cluster resolver is specified, and running eagerly, execute the init + # ops in the current device scope. + if context.executing_eagerly(): + curr_device = device.DeviceSpec.from_string(context.context().device_name) + if curr_device.job is not None: + job = "{}/replica:0/task:0".format(curr_device.job) + + cluster_resolver = tpu_cluster_resolver_cls("") + assert isinstance(cluster_resolver, tpu_cluster_resolver_cls) + + tpu_name = compat.as_text(cluster_resolver._tpu) # pylint: disable=protected-access + if tpu_name not in _INITIALIZED_TPU_SYSTEMS: + logging.warning("You are shutting down a TPU system %s that has not been " + "initialized." % tpu_name) + + logging.info("Shutting down the TPU system: %s", tpu_name) + + if context.executing_eagerly(): + # This function looks as it is for the following non-intuitive reasons. + # tpu.shutdown_system creates a dummy op whose sole purpose is to trigger + # DistributedTPURewritePass. This pass actually adds real ops that + # shutdown the TPU system. Thus, we can't simply run tpu.shutdown_system + # eagerly. We need to wrap it in defun and trigger the rewrite passes on it. + if tpu_name not in _LOCAL_MASTERS: + # Explicitly place the tpu.shutdown_system in the first worker to + # avoid the output node match multiple devices error. + job = "{}/replica:0/task:0".format(cluster_resolver.get_job_name()) + + @def_function.function(autograph=False) + def _tpu_shutdown_fn(): + tpu.shutdown_system(job=job) + + # The TPU_SYSTEM device must match the device used in tpu.shutdown_system + # exactly, otherwise you can get errors if there are multiple TPU_SYSTEM + # devices available. + run_eagerly = def_function.functions_run_eagerly() + if run_eagerly: + logging.warning( + "It looks like tf.function behavior was disabled, perhaps using" + " tf.config.run_functions_eagerly." + " tf.tpu.experimental.shutdown_tpu_system requires tf.function to" + " work. This primitive will override the disable." + ) + def_function.run_functions_eagerly(False) + try: + with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access + _tpu_shutdown_fn() + finally: + if run_eagerly is not None: + def_function.run_functions_eagerly(run_eagerly) + + # Clear out the eager context caches since the memory is invalid now. + logging.info("Clearing out eager caches") + context.context()._clear_caches() # pylint: disable=protected-access + context.context().clear_kernel_cache() + elif not ops.executing_eagerly_outside_functions(): + master = cluster_resolver.master() + cluster_spec = cluster_resolver.cluster_spec() + + session_config = config_pb2.ConfigProto(allow_soft_placement=True) + if cluster_spec: + session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def()) + + with ops.Graph().as_default(): + with session_lib.Session(config=session_config, target=master) as sess: + sess.run(tpu.shutdown_system()) + else: + raise RuntimeError( + "initialize_tpu_system is not supported within " + "tf.functions. You should call initialize_tpu_system outside of your tf.function. " + ) + + logging.info("Finished shutting down TPU system.") + if tpu_name in _INITIALIZED_TPU_SYSTEMS: + del _INITIALIZED_TPU_SYSTEMS[tpu_name] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_system_metadata.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_system_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..82f906c6160262c0c8bdea7c13bd49232032f0fa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tpu_system_metadata.py @@ -0,0 +1,227 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =================================================================== +"""TPU system metadata and associated tooling.""" + +import collections + +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python.client import session as session_lib +from tensorflow.python.distribute import device_util +from tensorflow.python.eager import context +from tensorflow.python.framework import config +from tensorflow.python.framework import device as tf_device +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.tpu import tpu +from tensorflow.python.util.tf_export import tf_export + +_PINGING_MASTER_TIMEOUT_IN_MS = 5 * 60 * 1000 # 10 min +_RETRY_TIMES = 12 * 24 # 1 day +_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS = 300 * 1000 # 5 mins + +_DEFAULT_JOB_NAME = 'tpu_worker' +_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator' +_LOCAL_MASTERS = ('', 'local') + + +@tf_export('tpu.experimental.TPUSystemMetadata') +class TPUSystemMetadata( + collections.namedtuple('TPUSystemMetadata', [ + 'num_cores', + 'num_hosts', + 'num_of_cores_per_host', + 'topology', + 'devices', + ])): + """Describes some metadata about the TPU system. + + Attributes: + num_cores: interger. Total number of TPU cores in the TPU system. + num_hosts: interger. Total number of hosts (TPU workers) in the TPU system. + num_of_cores_per_host: interger. Number of TPU cores per host (TPU worker). + topology: an instance of `tf.tpu.experimental.Topology`, which describes the + physical topology of TPU system. + devices: a tuple of strings, which describes all the TPU devices in the + system. + """ + + def __new__(cls, num_cores, num_hosts, num_of_cores_per_host, topology, + devices): + return super(TPUSystemMetadata, + cls).__new__(cls, num_cores, num_hosts, num_of_cores_per_host, + topology, devices) + + +def _query_tpu_system_metadata(master_address, cluster_def=None, + query_topology=False): + """Automatically detects the TPU system metadata in the system.""" + tpu_core_count = 0 + devices = [] + device_dict = collections.defaultdict(list) + + if context.executing_eagerly(): + logical_devices = config.list_logical_devices() + + # We want the output type to match in both eager and session mode + devices = [session_lib._DeviceAttributes(device_util.canonicalize(d.name), # pylint: disable=protected-access + d.device_type, 0, 0) + for d in logical_devices] + else: + # TODO(b/120564445): Replace with standard library for retries. + retry_count = 1 + while True: + logging.info('Querying Tensorflow master (%s) for TPU system metadata.', + master_address) + try: + with ops.Graph().as_default(): + with session_lib.Session( + master_address, + config=get_session_config_with_timeout( + _PINGING_MASTER_TIMEOUT_IN_MS, + cluster_def)) as sess: + devices = sess.list_devices() + break + except errors.DeadlineExceededError: + msg = ('Failed to connect to the Tensorflow master. The TPU worker may ' + 'not be ready (still scheduling) or the Tensorflow master ' + 'address is incorrect: got (%s).' % + (master_address)) + + # TODO(xiejw): For local or grpc master we might not need retry logic + # here. + if retry_count <= _RETRY_TIMES: + logging.warning('%s', msg) + logging.warning('Retrying (%d/%d).', retry_count, _RETRY_TIMES) + retry_count += 1 + else: + raise ValueError(msg) + + for device in devices: + spec = tf_device.DeviceSpec.from_string(device.name) + if spec.device_type == 'TPU': + device_dict[spec.task].append(spec.device_index) + tpu_core_count += 1 + + num_of_cores_per_host = 0 + if tpu_core_count: + num_cores_per_host_set = set( + [len(core_ids) for core_ids in device_dict.values()]) + if len(num_cores_per_host_set) != 1: + raise RuntimeError( + 'TPU cores on each host is not same. This should not happen!. ' + 'devices: {}'.format(devices)) + num_of_cores_per_host = num_cores_per_host_set.pop() + + topology = None + if query_topology: + if not tpu_core_count: + raise RuntimeError( + 'Cannot find any TPU cores in the system (master address {}). ' + 'This usually means the master address is incorrect or the ' + 'TPU worker has some problems. Available devices: {}'.format( + master_address, devices)) + + topology = _obtain_topology(master_address, cluster_def) + + # We sort the metadata devices so that downstream users get a sorted list + # for creating mirrored variables correctly. + def _sort_key(device): + spec = tf_device.DeviceSpec.from_string(device.name) + return (spec.job, spec.replica, spec.task, spec.device_type, + spec.device_index) + devices = tuple(sorted(devices, key=_sort_key)) + + metadata = TPUSystemMetadata( + num_cores=tpu_core_count, + num_hosts=len(device_dict), + num_of_cores_per_host=num_of_cores_per_host, + topology=topology, + devices=devices) + + if tpu_core_count: + logging.info('Found TPU system:') + logging.info('*** Num TPU Cores: %d', metadata.num_cores) + logging.info('*** Num TPU Workers: %d', metadata.num_hosts) + logging.info('*** Num TPU Cores Per Worker: %d', + metadata.num_of_cores_per_host) + for device in metadata.devices: + logging.info('*** Available Device: %s', device) + else: + logging.info('Failed to find TPU: %s', metadata) + return metadata + + +def _obtain_topology(master_address, cluster_def): + """Obtains TPU fabric topology.""" + try: + logging.info('Initializing TPU system (master: %s) to fetch topology ' + 'for model parallelism. This might take a while.', + master_address) + with ops.Graph().as_default(): + session_config = get_session_config_with_timeout( + _INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS, cluster_def) + with session_lib.Session( + master_address, config=session_config) as sess: + topology = sess.run(tpu.initialize_system()) + return topology + except errors.DeadlineExceededError: + raise ValueError( + 'Fail to initialize TPU system with master (%s). ' + 'Please double check the TPU system is functional.' % ( + master_address)) + + +def get_session_config_with_timeout(timeout_in_secs, cluster_def): + """Returns a session given a timeout and a cluster configuration.""" + config_proto = config_pb2.ConfigProto( + operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def) + return config_proto + + +def master_job(master, cluster_def): + """Returns the canonical job name to use to place TPU computations on. + + Args: + master: A `string` representing the TensorFlow master to use. + cluster_def: A ClusterDef object describing the TPU cluster. + + Returns: + A string containing the job name, or None if no job should be specified. + + Raises: + ValueError: If the user needs to specify a tpu_job_name, because we are + unable to infer the job name automatically, or if the user-specified job + names are inappropriate. + """ + # If the user specifies the tpu_job_name, use that. + + if master in _LOCAL_MASTERS: + return None + + if (not cluster_def or not cluster_def.job): + return _DEFAULT_JOB_NAME + job_names = set(job.name for job in cluster_def.job) + if _DEFAULT_JOB_NAME in job_names: + # b/37868888 tracks allowing ClusterSpec propagation to reuse job names. + raise ValueError('Currently, tpu_worker is not an allowed job name.') + if len(job_names) == 1: + return cluster_def.job[0].name + if len(job_names) == 2: + if _DEFAULT_COORDINATOR_JOB_NAME in job_names: + job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME) + return job_names.pop() + # TODO(b/67716447): Include more sophisticated heuristics. + raise ValueError('Could not infer TPU job name.') diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/training_loop.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/training_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..ea379e92b5740cdcd72f3d34a7f6f09d6e0c9c9a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/training_loop.py @@ -0,0 +1,229 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +"""Library for constructing a training loop, suitable for TPUs.""" + +from typing import Any, Callable, Iterable, List, Optional, Union + +from tensorflow.python.compiler.xla import xla +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import while_loop as while_loop_tf +from tensorflow.python.tpu import tensor_tracer +from tensorflow.python.tpu import tpu_feed +from tensorflow.python.tpu import tpu_function +from tensorflow.python.types import core as core_types + + +def while_loop(condition: Callable[..., Any], + body: Callable[..., Any], + inputs: Optional[List[Any]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + name: Any = None) -> Any: + """Builds a training loop for TPUs. + + The set of loop-carried tensors corresponds to `inputs`. Both + `condition` and `body` take the current value of the loop-carried + tensors. 'body' additionally takes a tuple of infeed from + infeed_queue if infeed_queue is not None. `condition` must return a + single boolean value that determines whether iteration + continues. `body` must return an updated list of values for the + loop-carried tensors. + + Args: + condition: a Python function that builds the loop condition. + body: a Python function that builds the loop body. + inputs: a list of initial values passed into the training loop, or None + (equivalent to an empty list). + infeed_queue: if not None, the infeed queue from which to append a tuple of + arguments as inputs to condition. + name: (Deprecated) Does nothing. + + Returns: + The final values of the loop-carried tensors. + + Raises: + TypeError: if body or condition has the wrong signature. + """ + del name + # Converts inputs to Tensors. + inputs = [] if inputs is None else [ops.convert_to_tensor(x) for + x in inputs] + input_types = [x.dtype for x in inputs] + input_arity = len(inputs) + + body_arg_error = xla.check_function_argument_count( + body, input_arity, infeed_queue) + if body_arg_error is not None: + if infeed_queue is None: + raise TypeError( + f"Supplied loop body function cannot be called with the specified " + f"inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop body needs {body_arg_error}" + ) + else: + raise TypeError( + f"Supplied loop body function cannot be called with the specified " + f"inputs. You specified {input_arity} inputs: {[i.name for i in inputs]} and {infeed_queue.number_of_tuple_elements} additional inputs from " + f"infeed, but the computation needs {body_arg_error}") + condition_arg_error = xla.check_function_argument_count( + condition, input_arity, None) + if condition_arg_error is not None: + if infeed_queue is None: + raise TypeError( + f"Supplied loop condition function cannot be called with the " + f"specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop " + f"condition needs {condition_arg_error}") + else: + raise TypeError( + f"Supplied loop condition function cannot be called with the " + f"specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs]}, but the loop " + f"condition needs {condition_arg_error}. Note that infeed is not passed to the loop condition." + ) + + def condition_wrapper(*inputs): + # Discards the dummy output added for arity-0 loops. + if input_arity == 0: + inputs = [] + return condition(*inputs) + + def body_wrapper(*inputs): + """Wrapper around `body` that handles infeed queues and control deps.""" + inputs = list(inputs) + + # Discards the dummy output added for arity-0 loops. + if input_arity == 0: + inputs = [] + + # Runs `body` with the dequeue_ops appended. + if infeed_queue: + number_of_shards = tpu_function.get_tpu_context().number_of_shards + if number_of_shards is None: + raise ValueError("Can't build training loop with infeed when there is " + "no tpu_shard_context. Are you building a loop or " + "graph directly rather than from inside tpu.rewrite, " + "tpu.batch_parallel, tpu.shard, or tpu.replicate?") + infeed_queue.set_number_of_shards(number_of_shards) + dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()] + else: + dequeue_ops = [] + outputs = body(*(inputs + dequeue_ops)) + + # If the computation only returned one value, make it a tuple. + if not isinstance(outputs, (list, tuple)): + outputs = (outputs,) + + outputs = [ + o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o) + for o in outputs + ] + + # Separates the returned Operations and Tensors. + output_operations = [o for o in outputs if isinstance(o, ops.Operation)] + output_tensors = [o for o in outputs + if not isinstance(o, ops.Operation)] + + if outputs != output_tensors + output_operations: + raise ValueError( + "TPU training loop body must return zero or more Tensor values " + "followed by zero or more Operations.") + + output_types = [op.dtype for op in output_tensors] + if input_types != output_types: + raise TypeError( + "Mismatch between input types and output types for training loop " + "body: {} vs {}".format(input_types, output_types)) + + # Add the dequeue operations to output_operations to ensure they are run + # by the loop, even if the programmer's loop body does not use them. + output_operations += dequeue_ops + + # Add a dummy output, if needed. + if not output_tensors: + output_tensors = array_ops.constant(0) + + if output_operations: + # TODO(phawkins): in principle this is too restrictive since it serializes + # the training loop steps. In practice it does not matter since this loop + # will be compiled by XLA. + output_tensors = control_flow_ops.tuple(output_tensors, + control_inputs=output_operations) + + if tensor_tracer.TensorTracer.is_enabled(): + num_replicas = tpu_function.get_tpu_context().number_of_shards + if num_replicas is None: + num_replicas = 1 + tt = tensor_tracer.TensorTracer() + output_tensors = tt.trace_tpu(ops.get_default_graph(), + output_tensors, None, + num_replicas) + return output_tensors + + # If the body has arity 0, add a dummy loop-carried value to which we can add + # control dependencies from any side-effecting operations. + if input_arity == 0: + inputs = [array_ops.constant(0)] + return while_loop_tf.while_loop( + condition_wrapper, body_wrapper, inputs, name="", parallel_iterations=1) + + +def repeat( + n: int, + body: Callable[..., Union[core_types.TensorLike, Iterable]], # pylint:disable=g-bare-generic + inputs: Optional[List[core_types.TensorLike]] = None, + infeed_queue: Optional[tpu_feed.InfeedQueue] = None, + name: Any = None) -> List[core_types.TensorLike]: + """Builds a training loop that executes a fixed number of iterations. + + The set of loop-carried tensors correspond to `inputs`. + `body` must be a function that takes and returns the values of the + loop-carried tensors. + + Args: + n: the number of loop iterations + body: a Python function that builds the loop body. + inputs: a list of initial values passed into the training loop or None + (equivalent to an empty list). + infeed_queue: if not None, the infeed queue from which to append a tuple of + arguments as inputs to condition. + name: (Deprecated) Does nothing. + + Returns: + The final values of the loop-carried tensors. + Raises: + ValueError: if there is a type error. + """ + def _convert_to_list(xs): + if not isinstance(xs, (list, tuple)): + return [xs] + else: + return list(xs) + + def cond(i, *args): + del args + return i < n + + def body_wrapper(i, *args): + return [i + 1] + _convert_to_list(body(*args)) + + inputs = [0] if inputs is None else [0] + _convert_to_list(inputs) + outputs = while_loop( + cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name) + outputs = _convert_to_list(outputs) + if len(outputs) == 1: + # Returns the Op rather than an empty list. + return outputs[0].op + else: + return outputs[1:] diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e896cf8d772e5b0cd3950ffbf19490ba25d55440 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/all_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/all_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e203852abce018935f0490dabde775df4b67b9fc Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/all_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/compat.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adab26ded509c63ad6d7df18274a32a783758bab Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/compat.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/custom_nest_protocol.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/custom_nest_protocol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2de4f4a1c9667e4f2bef4861e0027ed090ca0cf Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/custom_nest_protocol.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecation.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95a84ca2d389afb359c72fc32df939068c991567 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/deprecation.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/example_parser_configuration.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/example_parser_configuration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f398c93a2a61f6dc5d55f8cae9134b54bca963b5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/example_parser_configuration.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/function_utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/function_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff71d56208aac39a80a604d4bcb965f8a6a668a2 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/function_utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keras_deps.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keras_deps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..752c26e6ecb03c90135533a6295d5f27dffe3a6e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keras_deps.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lazy_loader.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lazy_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fdcd75a7d4f638eb1f8733f265f1de4fe10f3be Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lazy_loader.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/serialization.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1107a286818955d4ab72c7f37aeaa27714894889 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/serialization.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_inspect.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..067ed7a7429740dd752da6a45963ca0e7bef76a7 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_inspect.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_should_use.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_should_use.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..610c5663fc5119e825fd17a99b6c6d79cbb1e810 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_should_use.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/type_annotations.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/type_annotations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16a7ea80354c9c0400c656d907a3956227ed9492 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/type_annotations.cpython-310.pyc differ