Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llava_next/share/terminfo/p/p7 +0 -0
- llava_next/share/terminfo/p/pc7300 +0 -0
- llava_next/share/terminfo/p/pccon0 +0 -0
- llava_next/share/terminfo/p/pcconsole +0 -0
- llava_next/share/terminfo/p/pcvt25 +0 -0
- llava_next/share/terminfo/p/pcvtXX +0 -0
- llava_next/share/terminfo/p/pe1251 +0 -0
- llava_next/share/terminfo/p/pe550 +0 -0
- llava_next/share/terminfo/p/prism14-w +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/mtia/_utils.py +38 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake +40 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake +109 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake +303 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake +280 -0
- parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake +30 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so +3 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__init__.py +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/__init__.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/base.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/convolutional.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/core.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/layers.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/normalization.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/pooling.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/utils.cpython-310.pyc +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/base.py +22 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/convolutional.py +48 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/core.py +33 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/layers.py +71 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/normalization.py +34 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/pooling.py +39 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/layers/utils.py +225 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__init__.py +19 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/api.py +33 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/device_assignment.py +569 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column.py +690 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column_v2.py +1097 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/functional.py +19 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/preempted_hook.py +89 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer.py +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_flags.py +504 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_pb2.py +42 -0
- videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/topology.py +239 -0
.gitattributes
CHANGED
|
@@ -810,3 +810,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_py_exception_r
|
|
| 810 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so filter=lfs diff=lfs merge=lfs -text
|
| 811 |
videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so filter=lfs diff=lfs merge=lfs -text
|
| 812 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 810 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so filter=lfs diff=lfs merge=lfs -text
|
| 811 |
videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so filter=lfs diff=lfs merge=lfs -text
|
| 812 |
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so filter=lfs diff=lfs merge=lfs -text
|
| 813 |
+
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so filter=lfs diff=lfs merge=lfs -text
|
llava_next/share/terminfo/p/p7
ADDED
|
Binary file (553 Bytes). View file
|
|
|
llava_next/share/terminfo/p/pc7300
ADDED
|
Binary file (1.01 kB). View file
|
|
|
llava_next/share/terminfo/p/pccon0
ADDED
|
Binary file (1.44 kB). View file
|
|
|
llava_next/share/terminfo/p/pcconsole
ADDED
|
Binary file (486 Bytes). View file
|
|
|
llava_next/share/terminfo/p/pcvt25
ADDED
|
Binary file (1.52 kB). View file
|
|
|
llava_next/share/terminfo/p/pcvtXX
ADDED
|
Binary file (1.5 kB). View file
|
|
|
llava_next/share/terminfo/p/pe1251
ADDED
|
Binary file (459 Bytes). View file
|
|
|
llava_next/share/terminfo/p/pe550
ADDED
|
Binary file (421 Bytes). View file
|
|
|
llava_next/share/terminfo/p/prism14-w
ADDED
|
Binary file (1.14 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc
ADDED
|
Binary file (8.32 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc
ADDED
|
Binary file (4.88 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (2.44 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/dispatch.cpython-310.pyc
ADDED
|
Binary file (318 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/unification_tools.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/fx/experimental/unification/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.39 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/mtia/_utils.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
# The _get_device_index has been moved to torch.utils._get_device_index
|
| 6 |
+
from torch._utils import _get_device_index as _torch_get_device_index
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _get_device_index(
|
| 10 |
+
device: Any, optional: bool = False, allow_cpu: bool = False
|
| 11 |
+
) -> int:
|
| 12 |
+
r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``.
|
| 13 |
+
|
| 14 |
+
If :attr:`device` is a torch.device object, returns the device index if it
|
| 15 |
+
is a MTIA device. Note that for a MTIA device without a specified index,
|
| 16 |
+
i.e., ``torch.device('mtia')``, this will return the current default MTIA
|
| 17 |
+
device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
|
| 18 |
+
CPU devices will be accepted and ``-1`` will be returned in this case.
|
| 19 |
+
|
| 20 |
+
If :attr:`device` is a Python integer, it is returned as is.
|
| 21 |
+
|
| 22 |
+
If :attr:`device` is ``None``, this will return the current default MTIA
|
| 23 |
+
device if :attr:`optional` is ``True``.
|
| 24 |
+
"""
|
| 25 |
+
if isinstance(device, int):
|
| 26 |
+
return device
|
| 27 |
+
if isinstance(device, str):
|
| 28 |
+
device = torch.device(device)
|
| 29 |
+
if isinstance(device, torch.device):
|
| 30 |
+
if allow_cpu:
|
| 31 |
+
if device.type not in ["mtia", "cpu"]:
|
| 32 |
+
raise ValueError(f"Expected a mtia or cpu device, but got: {device}")
|
| 33 |
+
elif device.type != "mtia":
|
| 34 |
+
raise ValueError(f"Expected a mtia device, but got: {device}")
|
| 35 |
+
if not torch.jit.is_scripting():
|
| 36 |
+
if isinstance(device, torch.mtia.device):
|
| 37 |
+
return device.idx
|
| 38 |
+
return _torch_get_device_index(device, optional, allow_cpu)
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
|
| 2 |
+
# file Copyright.txt or https://cmake.org/licensing for details.
|
| 3 |
+
|
| 4 |
+
# Present in upstream, but not supported on versions of cmake we need to support
|
| 5 |
+
# include_guard(GLOBAL)
|
| 6 |
+
|
| 7 |
+
# Initializes `<_PREFIX>_<CONFIG>` variables from the corresponding
|
| 8 |
+
# `<_PREFIX>_<CONFIG>_INIT`, for the configurations currently used.
|
| 9 |
+
function(cmake_initialize_per_config_variable _PREFIX _DOCSTRING)
|
| 10 |
+
string(STRIP "${${_PREFIX}_INIT}" _INIT)
|
| 11 |
+
set("${_PREFIX}" "${_INIT}"
|
| 12 |
+
CACHE STRING "${_DOCSTRING} during all build types.")
|
| 13 |
+
mark_as_advanced("${_PREFIX}")
|
| 14 |
+
|
| 15 |
+
if (NOT CMAKE_NOT_USING_CONFIG_FLAGS)
|
| 16 |
+
set(_CONFIGS Debug Release MinSizeRel RelWithDebInfo)
|
| 17 |
+
|
| 18 |
+
get_property(_GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
| 19 |
+
if (_GENERATOR_IS_MULTI_CONFIG)
|
| 20 |
+
list(APPEND _CONFIGS ${CMAKE_CONFIGURATION_TYPES})
|
| 21 |
+
else()
|
| 22 |
+
if (NOT CMAKE_NO_BUILD_TYPE)
|
| 23 |
+
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE_INIT}" CACHE STRING
|
| 24 |
+
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel ...")
|
| 25 |
+
endif()
|
| 26 |
+
list(APPEND _CONFIGS ${CMAKE_BUILD_TYPE})
|
| 27 |
+
endif()
|
| 28 |
+
|
| 29 |
+
list(REMOVE_DUPLICATES _CONFIGS)
|
| 30 |
+
foreach(_BUILD_TYPE IN LISTS _CONFIGS)
|
| 31 |
+
if (NOT "${_BUILD_TYPE}" STREQUAL "")
|
| 32 |
+
string(TOUPPER "${_BUILD_TYPE}" _BUILD_TYPE)
|
| 33 |
+
string(STRIP "${${_PREFIX}_${_BUILD_TYPE}_INIT}" _INIT)
|
| 34 |
+
set("${_PREFIX}_${_BUILD_TYPE}" "${_INIT}"
|
| 35 |
+
CACHE STRING "${_DOCSTRING} during ${_BUILD_TYPE} builds.")
|
| 36 |
+
mark_as_advanced("${_PREFIX}_${_BUILD_TYPE}")
|
| 37 |
+
endif()
|
| 38 |
+
endforeach()
|
| 39 |
+
endif()
|
| 40 |
+
endfunction()
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 2 |
+
# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2007-2009
|
| 7 |
+
# Scientific Computing and Imaging Institute, University of Utah
|
| 8 |
+
#
|
| 9 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 10 |
+
# for the text of the license.
|
| 11 |
+
|
| 12 |
+
# The MIT License
|
| 13 |
+
#
|
| 14 |
+
# License for the specific language governing rights and limitations under
|
| 15 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 16 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 17 |
+
# to deal in the Software without restriction, including without limitation
|
| 18 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 19 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 20 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 21 |
+
#
|
| 22 |
+
# The above copyright notice and this permission notice shall be included
|
| 23 |
+
# in all copies or substantial portions of the Software.
|
| 24 |
+
#
|
| 25 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 26 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 27 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 28 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 29 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 30 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 31 |
+
# DEALINGS IN THE SOFTWARE.
|
| 32 |
+
#
|
| 33 |
+
|
| 34 |
+
#######################################################################
|
| 35 |
+
# Parses a .cubin file produced by nvcc and reports statistics about the file.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
file(READ ${input_file} file_text)
|
| 39 |
+
|
| 40 |
+
if (NOT "${file_text}" STREQUAL "")
|
| 41 |
+
|
| 42 |
+
string(REPLACE ";" "\\;" file_text ${file_text})
|
| 43 |
+
string(REPLACE "\ncode" ";code" file_text ${file_text})
|
| 44 |
+
|
| 45 |
+
list(LENGTH file_text len)
|
| 46 |
+
|
| 47 |
+
foreach(line ${file_text})
|
| 48 |
+
|
| 49 |
+
# Only look at "code { }" blocks.
|
| 50 |
+
if(line MATCHES "^code")
|
| 51 |
+
|
| 52 |
+
# Break into individual lines.
|
| 53 |
+
string(REGEX REPLACE "\n" ";" line ${line})
|
| 54 |
+
|
| 55 |
+
foreach(entry ${line})
|
| 56 |
+
|
| 57 |
+
# Extract kernel names.
|
| 58 |
+
if (${entry} MATCHES "[^g]name = ([^ ]+)")
|
| 59 |
+
set(entry "${CMAKE_MATCH_1}")
|
| 60 |
+
|
| 61 |
+
# Check to see if the kernel name starts with "_"
|
| 62 |
+
set(skip FALSE)
|
| 63 |
+
# if (${entry} MATCHES "^_")
|
| 64 |
+
# Skip the rest of this block.
|
| 65 |
+
# message("Skipping ${entry}")
|
| 66 |
+
# set(skip TRUE)
|
| 67 |
+
# else ()
|
| 68 |
+
message("Kernel: ${entry}")
|
| 69 |
+
# endif ()
|
| 70 |
+
|
| 71 |
+
endif()
|
| 72 |
+
|
| 73 |
+
# Skip the rest of the block if necessary
|
| 74 |
+
if(NOT skip)
|
| 75 |
+
|
| 76 |
+
# Registers
|
| 77 |
+
if (${entry} MATCHES "reg([ ]+)=([ ]+)([^ ]+)")
|
| 78 |
+
set(entry "${CMAKE_MATCH_3}")
|
| 79 |
+
message("Registers: ${entry}")
|
| 80 |
+
endif()
|
| 81 |
+
|
| 82 |
+
# Local memory
|
| 83 |
+
if (${entry} MATCHES "lmem([ ]+)=([ ]+)([^ ]+)")
|
| 84 |
+
set(entry "${CMAKE_MATCH_3}")
|
| 85 |
+
message("Local: ${entry}")
|
| 86 |
+
endif()
|
| 87 |
+
|
| 88 |
+
# Shared memory
|
| 89 |
+
if (${entry} MATCHES "smem([ ]+)=([ ]+)([^ ]+)")
|
| 90 |
+
set(entry "${CMAKE_MATCH_3}")
|
| 91 |
+
message("Shared: ${entry}")
|
| 92 |
+
endif()
|
| 93 |
+
|
| 94 |
+
if (${entry} MATCHES "^}")
|
| 95 |
+
message("")
|
| 96 |
+
endif()
|
| 97 |
+
|
| 98 |
+
endif()
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
endforeach()
|
| 102 |
+
|
| 103 |
+
endif()
|
| 104 |
+
|
| 105 |
+
endforeach()
|
| 106 |
+
|
| 107 |
+
else()
|
| 108 |
+
# message("FOUND NO DEPENDS")
|
| 109 |
+
endif()
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# James Bigler, NVIDIA Corp (nvidia.com - jbigler)
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This code is licensed under the MIT License. See the FindCUDA.cmake script
|
| 6 |
+
# for the text of the license.
|
| 7 |
+
|
| 8 |
+
# The MIT License
|
| 9 |
+
#
|
| 10 |
+
# License for the specific language governing rights and limitations under
|
| 11 |
+
# Permission is hereby granted, free of charge, to any person obtaining a
|
| 12 |
+
# copy of this software and associated documentation files (the "Software"),
|
| 13 |
+
# to deal in the Software without restriction, including without limitation
|
| 14 |
+
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
| 15 |
+
# and/or sell copies of the Software, and to permit persons to whom the
|
| 16 |
+
# Software is furnished to do so, subject to the following conditions:
|
| 17 |
+
#
|
| 18 |
+
# The above copyright notice and this permission notice shall be included
|
| 19 |
+
# in all copies or substantial portions of the Software.
|
| 20 |
+
#
|
| 21 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| 22 |
+
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 23 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
| 24 |
+
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 25 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 26 |
+
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
| 27 |
+
# DEALINGS IN THE SOFTWARE.
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
##########################################################################
|
| 31 |
+
# This file runs the nvcc commands to produce the desired output file along with
|
| 32 |
+
# the dependency file needed by CMake to compute dependencies. In addition the
|
| 33 |
+
# file checks the output of each command and if the command fails it deletes the
|
| 34 |
+
# output files.
|
| 35 |
+
|
| 36 |
+
# Input variables
|
| 37 |
+
#
|
| 38 |
+
# verbose:BOOL=<> OFF: Be as quiet as possible (default)
|
| 39 |
+
# ON : Describe each step
|
| 40 |
+
#
|
| 41 |
+
# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or
|
| 42 |
+
# RelWithDebInfo, but it should match one of the
|
| 43 |
+
# entries in CUDA_HOST_FLAGS. This is the build
|
| 44 |
+
# configuration used when compiling the code. If
|
| 45 |
+
# blank or unspecified Debug is assumed as this is
|
| 46 |
+
# what CMake does.
|
| 47 |
+
#
|
| 48 |
+
# generated_file:STRING=<> File to generate. This argument must be passed in.
|
| 49 |
+
#
|
| 50 |
+
# generated_cubin_file:STRING=<> File to generate. This argument must be passed
|
| 51 |
+
# in if build_cubin is true.
|
| 52 |
+
|
| 53 |
+
cmake_policy(PUSH)
|
| 54 |
+
cmake_policy(SET CMP0007 NEW)
|
| 55 |
+
cmake_policy(SET CMP0010 NEW)
|
| 56 |
+
if(NOT generated_file)
|
| 57 |
+
message(FATAL_ERROR "You must specify generated_file on the command line")
|
| 58 |
+
endif()
|
| 59 |
+
|
| 60 |
+
# Set these up as variables to make reading the generated file easier
|
| 61 |
+
set(CMAKE_COMMAND "@CMAKE_COMMAND@") # path
|
| 62 |
+
set(source_file "@source_file@") # path
|
| 63 |
+
set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") # path
|
| 64 |
+
set(cmake_dependency_file "@cmake_dependency_file@") # path
|
| 65 |
+
set(CUDA_make2cmake "@CUDA_make2cmake@") # path
|
| 66 |
+
set(CUDA_parse_cubin "@CUDA_parse_cubin@") # path
|
| 67 |
+
set(build_cubin @build_cubin@) # bool
|
| 68 |
+
set(CUDA_HOST_COMPILER "@CUDA_HOST_COMPILER@") # path
|
| 69 |
+
# We won't actually use these variables for now, but we need to set this, in
|
| 70 |
+
# order to force this file to be run again if it changes.
|
| 71 |
+
set(generated_file_path "@generated_file_path@") # path
|
| 72 |
+
set(generated_file_internal "@generated_file@") # path
|
| 73 |
+
set(generated_cubin_file_internal "@generated_cubin_file@") # path
|
| 74 |
+
|
| 75 |
+
set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") # path
|
| 76 |
+
set(CUDA_NVCC_FLAGS @CUDA_NVCC_FLAGS@ ;; @CUDA_WRAP_OPTION_NVCC_FLAGS@) # list
|
| 77 |
+
@CUDA_NVCC_FLAGS_CONFIG@
|
| 78 |
+
set(nvcc_flags @nvcc_flags@) # list
|
| 79 |
+
set(CUDA_NVCC_INCLUDE_DIRS [==[@CUDA_NVCC_INCLUDE_DIRS@]==]) # list (needs to be in lua quotes to address backslashes)
|
| 80 |
+
string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}")
|
| 81 |
+
set(CUDA_NVCC_COMPILE_DEFINITIONS [==[@CUDA_NVCC_COMPILE_DEFINITIONS@]==]) # list (needs to be in lua quotes see #16510 ).
|
| 82 |
+
set(format_flag "@format_flag@") # string
|
| 83 |
+
set(cuda_language_flag @cuda_language_flag@) # list
|
| 84 |
+
|
| 85 |
+
# Clean up list of include directories and add -I flags
|
| 86 |
+
list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS)
|
| 87 |
+
set(CUDA_NVCC_INCLUDE_ARGS)
|
| 88 |
+
foreach(dir ${CUDA_NVCC_INCLUDE_DIRS})
|
| 89 |
+
# Extra quotes are added around each flag to help nvcc parse out flags with spaces.
|
| 90 |
+
list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}")
|
| 91 |
+
endforeach()
|
| 92 |
+
|
| 93 |
+
# Clean up list of compile definitions, add -D flags, and append to nvcc_flags
|
| 94 |
+
list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS)
|
| 95 |
+
foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS})
|
| 96 |
+
list(APPEND nvcc_flags "-D${def}")
|
| 97 |
+
endforeach()
|
| 98 |
+
|
| 99 |
+
if(build_cubin AND NOT generated_cubin_file)
|
| 100 |
+
message(FATAL_ERROR "You must specify generated_cubin_file on the command line")
|
| 101 |
+
endif()
|
| 102 |
+
|
| 103 |
+
# This is the list of host compilation flags. It C or CXX should already have
|
| 104 |
+
# been chosen by FindCUDA.cmake.
|
| 105 |
+
@CUDA_HOST_FLAGS@
|
| 106 |
+
|
| 107 |
+
# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler
|
| 108 |
+
set(nvcc_host_compiler_flags "")
|
| 109 |
+
# If we weren't given a build_configuration, use Debug.
|
| 110 |
+
if(NOT build_configuration)
|
| 111 |
+
set(build_configuration Debug)
|
| 112 |
+
endif()
|
| 113 |
+
string(TOUPPER "${build_configuration}" build_configuration)
|
| 114 |
+
#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}")
|
| 115 |
+
foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}})
|
| 116 |
+
# Extra quotes are added around each flag to help nvcc parse out flags with spaces.
|
| 117 |
+
string(APPEND nvcc_host_compiler_flags ",\"${flag}\"")
|
| 118 |
+
endforeach()
|
| 119 |
+
if (nvcc_host_compiler_flags)
|
| 120 |
+
set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags})
|
| 121 |
+
endif()
|
| 122 |
+
#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"")
|
| 123 |
+
# Add the build specific configuration flags
|
| 124 |
+
list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}})
|
| 125 |
+
|
| 126 |
+
# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority
|
| 127 |
+
list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 )
|
| 128 |
+
list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 )
|
| 129 |
+
if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER )
|
| 130 |
+
if (CUDA_HOST_COMPILER STREQUAL "@_CUDA_MSVC_HOST_COMPILER@" AND DEFINED CCBIN)
|
| 131 |
+
set(CCBIN -ccbin "${CCBIN}")
|
| 132 |
+
else()
|
| 133 |
+
set(CCBIN -ccbin "${CUDA_HOST_COMPILER}")
|
| 134 |
+
endif()
|
| 135 |
+
endif()
|
| 136 |
+
|
| 137 |
+
# cuda_execute_process - Executes a command with optional command echo and status message.
|
| 138 |
+
#
|
| 139 |
+
# status - Status message to print if verbose is true
|
| 140 |
+
# command - COMMAND argument from the usual execute_process argument structure
|
| 141 |
+
# ARGN - Remaining arguments are the command with arguments
|
| 142 |
+
#
|
| 143 |
+
# CUDA_result - return value from running the command
|
| 144 |
+
#
|
| 145 |
+
# Make this a macro instead of a function, so that things like RESULT_VARIABLE
|
| 146 |
+
# and other return variables are present after executing the process.
|
| 147 |
+
macro(cuda_execute_process status command)
|
| 148 |
+
set(_command ${command})
|
| 149 |
+
if(NOT "x${_command}" STREQUAL "xCOMMAND")
|
| 150 |
+
message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})")
|
| 151 |
+
endif()
|
| 152 |
+
if(verbose)
|
| 153 |
+
execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status})
|
| 154 |
+
# Now we need to build up our command string. We are accounting for quotes
|
| 155 |
+
# and spaces, anything else is left up to the user to fix if they want to
|
| 156 |
+
# copy and paste a runnable command line.
|
| 157 |
+
set(cuda_execute_process_string)
|
| 158 |
+
foreach(arg ${ARGN})
|
| 159 |
+
# If there are quotes, excape them, so they come through.
|
| 160 |
+
string(REPLACE "\"" "\\\"" arg ${arg})
|
| 161 |
+
# Args with spaces need quotes around them to get them to be parsed as a single argument.
|
| 162 |
+
if(arg MATCHES " ")
|
| 163 |
+
list(APPEND cuda_execute_process_string "\"${arg}\"")
|
| 164 |
+
else()
|
| 165 |
+
list(APPEND cuda_execute_process_string ${arg})
|
| 166 |
+
endif()
|
| 167 |
+
endforeach()
|
| 168 |
+
# Echo the command
|
| 169 |
+
execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string})
|
| 170 |
+
endif()
|
| 171 |
+
# Run the command
|
| 172 |
+
execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result )
|
| 173 |
+
endmacro()
|
| 174 |
+
|
| 175 |
+
# Delete the target file
|
| 176 |
+
cuda_execute_process(
|
| 177 |
+
"Removing ${generated_file}"
|
| 178 |
+
COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}"
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag
|
| 182 |
+
# for dependency generation and hope for the best.
|
| 183 |
+
set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}")
|
| 184 |
+
set(CUDA_VERSION @CUDA_VERSION@)
|
| 185 |
+
|
| 186 |
+
# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This
|
| 187 |
+
# can cause incorrect dependencies when #including files based on this macro which is
|
| 188 |
+
# defined in the generating passes of nvcc invocation. We will go ahead and manually
|
| 189 |
+
# define this for now until a future version fixes this bug.
|
| 190 |
+
set(CUDACC_DEFINE -D__CUDACC__)
|
| 191 |
+
|
| 192 |
+
# Generate the dependency file
|
| 193 |
+
cuda_execute_process(
|
| 194 |
+
"Generating dependency file: ${NVCC_generated_dependency_file}"
|
| 195 |
+
COMMAND "${CUDA_NVCC_EXECUTABLE}"
|
| 196 |
+
-M
|
| 197 |
+
${CUDACC_DEFINE}
|
| 198 |
+
"${source_file}"
|
| 199 |
+
-o "${NVCC_generated_dependency_file}"
|
| 200 |
+
${CCBIN}
|
| 201 |
+
${nvcc_flags}
|
| 202 |
+
${nvcc_host_compiler_flags}
|
| 203 |
+
${depends_CUDA_NVCC_FLAGS}
|
| 204 |
+
-DNVCC
|
| 205 |
+
${CUDA_NVCC_INCLUDE_ARGS}
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
if(CUDA_result)
|
| 209 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 210 |
+
endif()
|
| 211 |
+
|
| 212 |
+
# Generate the cmake readable dependency file to a temp file. Don't put the
|
| 213 |
+
# quotes just around the filenames for the input_file and output_file variables.
|
| 214 |
+
# CMake will pass the quotes through and not be able to find the file.
|
| 215 |
+
cuda_execute_process(
|
| 216 |
+
"Generating temporary cmake readable file: ${cmake_dependency_file}.tmp"
|
| 217 |
+
COMMAND "${CMAKE_COMMAND}"
|
| 218 |
+
-D "input_file:FILEPATH=${NVCC_generated_dependency_file}"
|
| 219 |
+
-D "output_file:FILEPATH=${cmake_dependency_file}.tmp"
|
| 220 |
+
-D "verbose=${verbose}"
|
| 221 |
+
-P "${CUDA_make2cmake}"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
if(CUDA_result)
|
| 225 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 226 |
+
endif()
|
| 227 |
+
|
| 228 |
+
# Copy the file if it is different
|
| 229 |
+
cuda_execute_process(
|
| 230 |
+
"Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}"
|
| 231 |
+
COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}"
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
if(CUDA_result)
|
| 235 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 236 |
+
endif()
|
| 237 |
+
|
| 238 |
+
# Delete the temporary file
|
| 239 |
+
cuda_execute_process(
|
| 240 |
+
"Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}"
|
| 241 |
+
COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}"
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
if(CUDA_result)
|
| 245 |
+
message(FATAL_ERROR "Error generating ${generated_file}")
|
| 246 |
+
endif()
|
| 247 |
+
|
| 248 |
+
# Generate the code
|
| 249 |
+
cuda_execute_process(
|
| 250 |
+
"Generating ${generated_file}"
|
| 251 |
+
COMMAND "${CUDA_NVCC_EXECUTABLE}"
|
| 252 |
+
"${source_file}"
|
| 253 |
+
${cuda_language_flag}
|
| 254 |
+
${format_flag} -o "${generated_file}"
|
| 255 |
+
${CCBIN}
|
| 256 |
+
${nvcc_flags}
|
| 257 |
+
${nvcc_host_compiler_flags}
|
| 258 |
+
${CUDA_NVCC_FLAGS}
|
| 259 |
+
-DNVCC
|
| 260 |
+
${CUDA_NVCC_INCLUDE_ARGS}
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
if(CUDA_result)
|
| 264 |
+
# Since nvcc can sometimes leave half done files make sure that we delete the output file.
|
| 265 |
+
cuda_execute_process(
|
| 266 |
+
"Removing ${generated_file}"
|
| 267 |
+
COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}"
|
| 268 |
+
)
|
| 269 |
+
message(FATAL_ERROR "Error generating file ${generated_file}")
|
| 270 |
+
else()
|
| 271 |
+
if(verbose)
|
| 272 |
+
message("Generated ${generated_file} successfully.")
|
| 273 |
+
endif()
|
| 274 |
+
endif()
|
| 275 |
+
|
| 276 |
+
# Cubin resource report commands.
|
| 277 |
+
if( build_cubin )
|
| 278 |
+
# Run with -cubin to produce resource usage report.
|
| 279 |
+
cuda_execute_process(
|
| 280 |
+
"Generating ${generated_cubin_file}"
|
| 281 |
+
COMMAND "${CUDA_NVCC_EXECUTABLE}"
|
| 282 |
+
"${source_file}"
|
| 283 |
+
${CUDA_NVCC_FLAGS}
|
| 284 |
+
${nvcc_flags}
|
| 285 |
+
${CCBIN}
|
| 286 |
+
${nvcc_host_compiler_flags}
|
| 287 |
+
-DNVCC
|
| 288 |
+
-cubin
|
| 289 |
+
-o "${generated_cubin_file}"
|
| 290 |
+
${CUDA_NVCC_INCLUDE_ARGS}
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# Execute the parser script.
|
| 294 |
+
cuda_execute_process(
|
| 295 |
+
"Executing the parser script"
|
| 296 |
+
COMMAND "${CMAKE_COMMAND}"
|
| 297 |
+
-D "input_file:STRING=${generated_cubin_file}"
|
| 298 |
+
-P "${CUDA_parse_cubin}"
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
endif()
|
| 302 |
+
|
| 303 |
+
cmake_policy(POP)
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Synopsis:
|
| 2 |
+
# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures])
|
| 3 |
+
# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures
|
| 4 |
+
# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...)
|
| 5 |
+
# - "Auto" detects local machine GPU compute arch at runtime.
|
| 6 |
+
# - "Common" and "All" cover common and entire subsets of architectures
|
| 7 |
+
# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX
|
| 8 |
+
# NAME: Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal Volta Turing Ampere
|
| 9 |
+
# NUM: Any number. Only those pairs are currently accepted by NVCC though:
|
| 10 |
+
# 3.5 3.7 5.0 5.2 5.3 6.0 6.2 7.0 7.2 7.5 8.0
|
| 11 |
+
# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable}
|
| 12 |
+
# Additionally, sets ${out_variable}_readable to the resulting numeric list
|
| 13 |
+
# Example:
|
| 14 |
+
# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell)
|
| 15 |
+
# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS})
|
| 16 |
+
#
|
| 17 |
+
# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA
|
| 18 |
+
#
|
| 19 |
+
|
| 20 |
+
if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language
|
| 21 |
+
if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA"
|
| 22 |
+
AND CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)")
|
| 23 |
+
set(CUDA_VERSION "${CMAKE_MATCH_1}")
|
| 24 |
+
endif()
|
| 25 |
+
endif()
|
| 26 |
+
|
| 27 |
+
# See: https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
|
| 28 |
+
|
| 29 |
+
# This list will be used for CUDA_ARCH_NAME = All option
|
| 30 |
+
set(CUDA_KNOWN_GPU_ARCHITECTURES "Kepler" "Maxwell")
|
| 31 |
+
|
| 32 |
+
# This list will be used for CUDA_ARCH_NAME = Common option (enabled by default)
|
| 33 |
+
set(CUDA_COMMON_GPU_ARCHITECTURES "3.5" "5.0")
|
| 34 |
+
|
| 35 |
+
# This list is used to filter CUDA archs when autodetecting
|
| 36 |
+
set(CUDA_ALL_GPU_ARCHITECTURES "3.5" "5.0")
|
| 37 |
+
|
| 38 |
+
if(CUDA_VERSION VERSION_GREATER "10.5")
|
| 39 |
+
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ampere")
|
| 40 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0")
|
| 41 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.0")
|
| 42 |
+
|
| 43 |
+
if(CUDA_VERSION VERSION_LESS "11.1")
|
| 44 |
+
set(CUDA_LIMIT_GPU_ARCHITECTURE "8.0")
|
| 45 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0+PTX")
|
| 46 |
+
endif()
|
| 47 |
+
endif()
|
| 48 |
+
|
| 49 |
+
if(NOT CUDA_VERSION VERSION_LESS "11.1")
|
| 50 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6")
|
| 51 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.6")
|
| 52 |
+
set(CUDA_LIMIT_GPU_ARCHITECUTRE "8.6")
|
| 53 |
+
|
| 54 |
+
if(CUDA_VERSION VERSION_LESS "11.8")
|
| 55 |
+
set(CUDA_LIMIT_GPU_ARCHITECTURE "8.9")
|
| 56 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6+PTX")
|
| 57 |
+
endif()
|
| 58 |
+
endif()
|
| 59 |
+
|
| 60 |
+
if(NOT CUDA_VERSION VERSION_LESS "11.8")
|
| 61 |
+
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ada")
|
| 62 |
+
list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Hopper")
|
| 63 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9")
|
| 64 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0")
|
| 65 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.9")
|
| 66 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0")
|
| 67 |
+
|
| 68 |
+
if(CUDA_VERSION VERSION_LESS "12.0")
|
| 69 |
+
set(CUDA_LIMIT_GPU_ARCHITECTURE "9.0")
|
| 70 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9+PTX")
|
| 71 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0+PTX")
|
| 72 |
+
endif()
|
| 73 |
+
endif()
|
| 74 |
+
|
| 75 |
+
if(NOT CUDA_VERSION VERSION_LESS "12.0")
|
| 76 |
+
list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0a")
|
| 77 |
+
list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0a")
|
| 78 |
+
list(REMOVE_ITEM CUDA_COMMON_GPU_ARCHITECTURES "3.5")
|
| 79 |
+
list(REMOVE_ITEM CUDA_ALL_GPU_ARCHITECTURES "3.5")
|
| 80 |
+
endif()
|
| 81 |
+
|
| 82 |
+
################################################################################################
|
| 83 |
+
# A function for automatic detection of GPUs installed (if autodetection is enabled)
|
| 84 |
+
# Usage:
|
| 85 |
+
# CUDA_DETECT_INSTALLED_GPUS(OUT_VARIABLE)
|
| 86 |
+
#
|
| 87 |
+
function(CUDA_DETECT_INSTALLED_GPUS OUT_VARIABLE)
|
| 88 |
+
if(NOT CUDA_GPU_DETECT_OUTPUT)
|
| 89 |
+
if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language
|
| 90 |
+
set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cu")
|
| 91 |
+
else()
|
| 92 |
+
set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cpp")
|
| 93 |
+
endif()
|
| 94 |
+
|
| 95 |
+
file(WRITE ${file} ""
|
| 96 |
+
"#include <cuda_runtime.h>\n"
|
| 97 |
+
"#include <cstdio>\n"
|
| 98 |
+
"int main()\n"
|
| 99 |
+
"{\n"
|
| 100 |
+
" int count = 0;\n"
|
| 101 |
+
" if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n"
|
| 102 |
+
" if (count == 0) return -1;\n"
|
| 103 |
+
" for (int device = 0; device < count; ++device)\n"
|
| 104 |
+
" {\n"
|
| 105 |
+
" cudaDeviceProp prop;\n"
|
| 106 |
+
" if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n"
|
| 107 |
+
" std::printf(\"%d.%d \", prop.major, prop.minor);\n"
|
| 108 |
+
" }\n"
|
| 109 |
+
" return 0;\n"
|
| 110 |
+
"}\n")
|
| 111 |
+
|
| 112 |
+
if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language
|
| 113 |
+
try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file}
|
| 114 |
+
RUN_OUTPUT_VARIABLE compute_capabilities)
|
| 115 |
+
else()
|
| 116 |
+
try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file}
|
| 117 |
+
CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CUDA_INCLUDE_DIRS}"
|
| 118 |
+
LINK_LIBRARIES ${CUDA_LIBRARIES}
|
| 119 |
+
RUN_OUTPUT_VARIABLE compute_capabilities)
|
| 120 |
+
endif()
|
| 121 |
+
|
| 122 |
+
# Filter unrelated content out of the output.
|
| 123 |
+
string(REGEX MATCHALL "[0-9]+\\.[0-9]+" compute_capabilities "${compute_capabilities}")
|
| 124 |
+
|
| 125 |
+
if(run_result EQUAL 0)
|
| 126 |
+
string(REPLACE "2.1" "2.1(2.0)" compute_capabilities "${compute_capabilities}")
|
| 127 |
+
set(CUDA_GPU_DETECT_OUTPUT ${compute_capabilities}
|
| 128 |
+
CACHE INTERNAL "Returned GPU architectures from detect_gpus tool" FORCE)
|
| 129 |
+
endif()
|
| 130 |
+
endif()
|
| 131 |
+
|
| 132 |
+
if(NOT CUDA_GPU_DETECT_OUTPUT)
|
| 133 |
+
message(STATUS "Automatic GPU detection failed. Building for common architectures.")
|
| 134 |
+
set(${OUT_VARIABLE} ${CUDA_COMMON_GPU_ARCHITECTURES} PARENT_SCOPE)
|
| 135 |
+
else()
|
| 136 |
+
# Filter based on CUDA version supported archs
|
| 137 |
+
set(CUDA_GPU_DETECT_OUTPUT_FILTERED "")
|
| 138 |
+
separate_arguments(CUDA_GPU_DETECT_OUTPUT)
|
| 139 |
+
foreach(ITEM IN ITEMS ${CUDA_GPU_DETECT_OUTPUT})
|
| 140 |
+
if(CUDA_LIMIT_GPU_ARCHITECTURE AND (ITEM VERSION_GREATER CUDA_LIMIT_GPU_ARCHITECTURE OR
|
| 141 |
+
ITEM VERSION_EQUAL CUDA_LIMIT_GPU_ARCHITECTURE))
|
| 142 |
+
list(GET CUDA_COMMON_GPU_ARCHITECTURES -1 NEWITEM)
|
| 143 |
+
string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${NEWITEM}")
|
| 144 |
+
else()
|
| 145 |
+
string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${ITEM}")
|
| 146 |
+
endif()
|
| 147 |
+
endforeach()
|
| 148 |
+
|
| 149 |
+
set(${OUT_VARIABLE} ${CUDA_GPU_DETECT_OUTPUT_FILTERED} PARENT_SCOPE)
|
| 150 |
+
endif()
|
| 151 |
+
endfunction()
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
################################################################################################
|
| 155 |
+
# Function for selecting GPU arch flags for nvcc based on CUDA architectures from parameter list
|
| 156 |
+
# Usage:
|
| 157 |
+
# SELECT_NVCC_ARCH_FLAGS(out_variable [list of CUDA compute archs])
|
| 158 |
+
function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable)
|
| 159 |
+
set(CUDA_ARCH_LIST "${ARGN}")
|
| 160 |
+
|
| 161 |
+
if("X${CUDA_ARCH_LIST}" STREQUAL "X" )
|
| 162 |
+
set(CUDA_ARCH_LIST "Auto")
|
| 163 |
+
endif()
|
| 164 |
+
|
| 165 |
+
set(cuda_arch_bin)
|
| 166 |
+
set(cuda_arch_ptx)
|
| 167 |
+
|
| 168 |
+
if("${CUDA_ARCH_LIST}" STREQUAL "All")
|
| 169 |
+
set(CUDA_ARCH_LIST ${CUDA_KNOWN_GPU_ARCHITECTURES})
|
| 170 |
+
elseif("${CUDA_ARCH_LIST}" STREQUAL "Common")
|
| 171 |
+
set(CUDA_ARCH_LIST ${CUDA_COMMON_GPU_ARCHITECTURES})
|
| 172 |
+
elseif("${CUDA_ARCH_LIST}" STREQUAL "Auto")
|
| 173 |
+
CUDA_DETECT_INSTALLED_GPUS(CUDA_ARCH_LIST)
|
| 174 |
+
message(STATUS "Autodetected CUDA architecture(s): ${CUDA_ARCH_LIST}")
|
| 175 |
+
endif()
|
| 176 |
+
|
| 177 |
+
# Now process the list and look for names
|
| 178 |
+
string(REGEX REPLACE "[ \t]+" ";" CUDA_ARCH_LIST "${CUDA_ARCH_LIST}")
|
| 179 |
+
list(REMOVE_DUPLICATES CUDA_ARCH_LIST)
|
| 180 |
+
foreach(arch_name ${CUDA_ARCH_LIST})
|
| 181 |
+
set(arch_bin)
|
| 182 |
+
set(arch_ptx)
|
| 183 |
+
set(add_ptx FALSE)
|
| 184 |
+
# Check to see if we are compiling PTX
|
| 185 |
+
if(arch_name MATCHES "(.*)\\+PTX$")
|
| 186 |
+
set(add_ptx TRUE)
|
| 187 |
+
set(arch_name ${CMAKE_MATCH_1})
|
| 188 |
+
endif()
|
| 189 |
+
if(arch_name MATCHES "^([0-9]\\.[0-9]a?(\\([0-9]\\.[0-9]\\))?)$")
|
| 190 |
+
set(arch_bin ${CMAKE_MATCH_1})
|
| 191 |
+
set(arch_ptx ${arch_bin})
|
| 192 |
+
else()
|
| 193 |
+
# Look for it in our list of known architectures
|
| 194 |
+
if(${arch_name} STREQUAL "Kepler+Tesla")
|
| 195 |
+
set(arch_bin 3.7)
|
| 196 |
+
elseif(${arch_name} STREQUAL "Kepler")
|
| 197 |
+
set(arch_bin 3.5)
|
| 198 |
+
set(arch_ptx 3.5)
|
| 199 |
+
elseif(${arch_name} STREQUAL "Maxwell+Tegra")
|
| 200 |
+
set(arch_bin 5.3)
|
| 201 |
+
elseif(${arch_name} STREQUAL "Maxwell")
|
| 202 |
+
set(arch_bin 5.0 5.2)
|
| 203 |
+
set(arch_ptx 5.2)
|
| 204 |
+
elseif(${arch_name} STREQUAL "Pascal")
|
| 205 |
+
set(arch_bin 6.0 6.1)
|
| 206 |
+
set(arch_ptx 6.1)
|
| 207 |
+
elseif(${arch_name} STREQUAL "Volta+Tegra")
|
| 208 |
+
set(arch_bin 7.2)
|
| 209 |
+
elseif(${arch_name} STREQUAL "Volta")
|
| 210 |
+
set(arch_bin 7.0 7.0)
|
| 211 |
+
set(arch_ptx 7.0)
|
| 212 |
+
elseif(${arch_name} STREQUAL "Turing")
|
| 213 |
+
set(arch_bin 7.5)
|
| 214 |
+
set(arch_ptx 7.5)
|
| 215 |
+
elseif(${arch_name} STREQUAL "Ampere+Tegra")
|
| 216 |
+
set(arch_bin 8.7)
|
| 217 |
+
elseif(${arch_name} STREQUAL "Ampere")
|
| 218 |
+
set(arch_bin 8.0 8.6)
|
| 219 |
+
set(arch_ptx 8.0 8.6)
|
| 220 |
+
elseif(${arch_name} STREQUAL "Ada")
|
| 221 |
+
set(arch_bin 8.9)
|
| 222 |
+
set(arch_ptx 8.9)
|
| 223 |
+
elseif(${arch_name} STREQUAL "Hopper")
|
| 224 |
+
set(arch_bin 9.0)
|
| 225 |
+
set(arch_ptx 9.0)
|
| 226 |
+
else()
|
| 227 |
+
message(SEND_ERROR "Unknown CUDA Architecture Name ${arch_name} in CUDA_SELECT_NVCC_ARCH_FLAGS")
|
| 228 |
+
endif()
|
| 229 |
+
endif()
|
| 230 |
+
if(NOT arch_bin)
|
| 231 |
+
message(SEND_ERROR "arch_bin wasn't set for some reason")
|
| 232 |
+
endif()
|
| 233 |
+
list(APPEND cuda_arch_bin ${arch_bin})
|
| 234 |
+
if(add_ptx)
|
| 235 |
+
if (NOT arch_ptx)
|
| 236 |
+
set(arch_ptx ${arch_bin})
|
| 237 |
+
endif()
|
| 238 |
+
list(APPEND cuda_arch_ptx ${arch_ptx})
|
| 239 |
+
endif()
|
| 240 |
+
endforeach()
|
| 241 |
+
|
| 242 |
+
# remove dots and convert to lists
|
| 243 |
+
string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
|
| 244 |
+
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}")
|
| 245 |
+
string(REGEX MATCHALL "[0-9()]+a?" cuda_arch_bin "${cuda_arch_bin}")
|
| 246 |
+
string(REGEX MATCHALL "[0-9]+a?" cuda_arch_ptx "${cuda_arch_ptx}")
|
| 247 |
+
|
| 248 |
+
if(cuda_arch_bin)
|
| 249 |
+
list(REMOVE_DUPLICATES cuda_arch_bin)
|
| 250 |
+
endif()
|
| 251 |
+
if(cuda_arch_ptx)
|
| 252 |
+
list(REMOVE_DUPLICATES cuda_arch_ptx)
|
| 253 |
+
endif()
|
| 254 |
+
|
| 255 |
+
set(nvcc_flags "")
|
| 256 |
+
set(nvcc_archs_readable "")
|
| 257 |
+
|
| 258 |
+
# Tell NVCC to add binaries for the specified GPUs
|
| 259 |
+
foreach(arch ${cuda_arch_bin})
|
| 260 |
+
if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
|
| 261 |
+
# User explicitly specified ARCH for the concrete CODE
|
| 262 |
+
list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
|
| 263 |
+
list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
|
| 264 |
+
else()
|
| 265 |
+
# User didn't explicitly specify ARCH for the concrete CODE, we assume ARCH=CODE
|
| 266 |
+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
|
| 267 |
+
list(APPEND nvcc_archs_readable sm_${arch})
|
| 268 |
+
endif()
|
| 269 |
+
endforeach()
|
| 270 |
+
|
| 271 |
+
# Tell NVCC to add PTX intermediate code for the specified architectures
|
| 272 |
+
foreach(arch ${cuda_arch_ptx})
|
| 273 |
+
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch})
|
| 274 |
+
list(APPEND nvcc_archs_readable compute_${arch})
|
| 275 |
+
endforeach()
|
| 276 |
+
|
| 277 |
+
string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}")
|
| 278 |
+
set(${out_variable} ${nvcc_flags} PARENT_SCOPE)
|
| 279 |
+
set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE)
|
| 280 |
+
endfunction()
|
parrot/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ---[ xpu
|
| 2 |
+
|
| 3 |
+
# Poor man's include guard
|
| 4 |
+
if(TARGET torch::xpurt)
|
| 5 |
+
return()
|
| 6 |
+
endif()
|
| 7 |
+
|
| 8 |
+
# Find SYCL library.
|
| 9 |
+
find_package(SYCLToolkit REQUIRED)
|
| 10 |
+
if(NOT SYCL_FOUND)
|
| 11 |
+
set(PYTORCH_FOUND_XPU FALSE)
|
| 12 |
+
return()
|
| 13 |
+
endif()
|
| 14 |
+
set(PYTORCH_FOUND_XPU TRUE)
|
| 15 |
+
|
| 16 |
+
# SYCL library interface
|
| 17 |
+
add_library(torch::sycl INTERFACE IMPORTED)
|
| 18 |
+
|
| 19 |
+
set_property(
|
| 20 |
+
TARGET torch::sycl PROPERTY INTERFACE_INCLUDE_DIRECTORIES
|
| 21 |
+
${SYCL_INCLUDE_DIR})
|
| 22 |
+
set_property(
|
| 23 |
+
TARGET torch::sycl PROPERTY INTERFACE_LINK_LIBRARIES
|
| 24 |
+
${SYCL_LIBRARY})
|
| 25 |
+
|
| 26 |
+
# xpurt
|
| 27 |
+
add_library(torch::xpurt INTERFACE IMPORTED)
|
| 28 |
+
set_property(
|
| 29 |
+
TARGET torch::xpurt PROPERTY INTERFACE_LINK_LIBRARIES
|
| 30 |
+
torch::sycl)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e9afb8654cb0473a45215f50e88d2deb315a9502f97ffbe579e944f69d21af3f
|
| 3 |
+
size 207656
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__init__.py
ADDED
|
File without changes
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (409 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/convolutional.cpython-310.pyc
ADDED
|
Binary file (1.1 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (488 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/layers.cpython-310.pyc
ADDED
|
Binary file (1.78 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/normalization.cpython-310.pyc
ADDED
|
Binary file (739 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/pooling.cpython-310.pyc
ADDED
|
Binary file (697 Bytes). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (5.84 kB). View file
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/base.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
"""Contains the base Layer class, from which all layers inherit."""
|
| 16 |
+
from tensorflow.python.keras.legacy_tf_layers import base
|
| 17 |
+
|
| 18 |
+
InputSpec = base.InputSpec
|
| 19 |
+
|
| 20 |
+
keras_style_scope = base.keras_style_scope
|
| 21 |
+
set_keras_style = base.set_keras_style
|
| 22 |
+
Layer = base.Layer
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/convolutional.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
|
| 16 |
+
"""Contains the convolutional layer classes and their functional aliases.
|
| 17 |
+
"""
|
| 18 |
+
from tensorflow.python.keras.legacy_tf_layers import convolutional
|
| 19 |
+
|
| 20 |
+
Conv1D = convolutional.Conv1D
|
| 21 |
+
conv1d = convolutional.conv1d
|
| 22 |
+
Conv2D = convolutional.Conv2D
|
| 23 |
+
conv2d = convolutional.conv2d
|
| 24 |
+
Conv3D = convolutional.Conv3D
|
| 25 |
+
conv3d = convolutional.conv3d
|
| 26 |
+
SeparableConv1D = convolutional.SeparableConv1D
|
| 27 |
+
SeparableConv2D = convolutional.SeparableConv2D
|
| 28 |
+
separable_conv1d = convolutional.separable_conv1d
|
| 29 |
+
separable_conv2d = convolutional.separable_conv2d
|
| 30 |
+
Conv2DTranspose = convolutional.Conv2DTranspose
|
| 31 |
+
conv2d_transpose = convolutional.conv2d_transpose
|
| 32 |
+
Conv3DTranspose = convolutional.Conv3DTranspose
|
| 33 |
+
conv3d_transpose = convolutional.conv3d_transpose
|
| 34 |
+
|
| 35 |
+
# Aliases
|
| 36 |
+
|
| 37 |
+
Convolution1D = Conv1D
|
| 38 |
+
Convolution2D = Conv2D
|
| 39 |
+
Convolution3D = Conv3D
|
| 40 |
+
SeparableConvolution2D = SeparableConv2D
|
| 41 |
+
Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose
|
| 42 |
+
Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose
|
| 43 |
+
convolution1d = conv1d
|
| 44 |
+
convolution2d = conv2d
|
| 45 |
+
convolution3d = conv3d
|
| 46 |
+
separable_convolution2d = separable_conv2d
|
| 47 |
+
convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose
|
| 48 |
+
convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/core.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
|
| 16 |
+
"""Contains the core layers: Dense, Dropout.
|
| 17 |
+
|
| 18 |
+
Also contains their functional aliases.
|
| 19 |
+
"""
|
| 20 |
+
from tensorflow.python.keras.legacy_tf_layers import core
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
Dense = core.Dense
|
| 24 |
+
dense = core.dense
|
| 25 |
+
Dropout = core.Dropout
|
| 26 |
+
dropout = core.dropout
|
| 27 |
+
Flatten = core.Flatten
|
| 28 |
+
flatten = core.flatten
|
| 29 |
+
|
| 30 |
+
# Aliases
|
| 31 |
+
|
| 32 |
+
FullyConnected = Dense
|
| 33 |
+
fully_connected = dense
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/layers.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
# pylint: disable=line-too-long
|
| 17 |
+
"""This library provides a set of high-level neural networks layers."""
|
| 18 |
+
|
| 19 |
+
# pylint: disable=g-bad-import-order,unused-import
|
| 20 |
+
|
| 21 |
+
# Base objects.
|
| 22 |
+
from tensorflow.python.layers.base import Layer
|
| 23 |
+
|
| 24 |
+
# Core layers.
|
| 25 |
+
from tensorflow.python.layers.core import Dense
|
| 26 |
+
from tensorflow.python.layers.core import Dropout
|
| 27 |
+
from tensorflow.python.layers.core import Flatten
|
| 28 |
+
|
| 29 |
+
from tensorflow.python.layers.core import dense
|
| 30 |
+
from tensorflow.python.layers.core import dropout
|
| 31 |
+
from tensorflow.python.layers.core import flatten
|
| 32 |
+
|
| 33 |
+
# Convolutional layers.
|
| 34 |
+
from tensorflow.python.layers.convolutional import SeparableConv1D
|
| 35 |
+
from tensorflow.python.layers.convolutional import SeparableConv2D
|
| 36 |
+
from tensorflow.python.layers.convolutional import SeparableConvolution2D
|
| 37 |
+
from tensorflow.python.layers.convolutional import Conv2DTranspose
|
| 38 |
+
from tensorflow.python.layers.convolutional import Convolution2DTranspose
|
| 39 |
+
from tensorflow.python.layers.convolutional import Conv3DTranspose
|
| 40 |
+
from tensorflow.python.layers.convolutional import Convolution3DTranspose
|
| 41 |
+
from tensorflow.python.layers.convolutional import Conv1D
|
| 42 |
+
from tensorflow.python.layers.convolutional import Convolution1D
|
| 43 |
+
from tensorflow.python.layers.convolutional import Conv2D
|
| 44 |
+
from tensorflow.python.layers.convolutional import Convolution2D
|
| 45 |
+
from tensorflow.python.layers.convolutional import Conv3D
|
| 46 |
+
from tensorflow.python.layers.convolutional import Convolution3D
|
| 47 |
+
|
| 48 |
+
from tensorflow.python.layers.convolutional import separable_conv1d
|
| 49 |
+
from tensorflow.python.layers.convolutional import separable_conv2d
|
| 50 |
+
from tensorflow.python.layers.convolutional import conv2d_transpose
|
| 51 |
+
from tensorflow.python.layers.convolutional import conv3d_transpose
|
| 52 |
+
from tensorflow.python.layers.convolutional import conv1d
|
| 53 |
+
from tensorflow.python.layers.convolutional import conv2d
|
| 54 |
+
from tensorflow.python.layers.convolutional import conv3d
|
| 55 |
+
|
| 56 |
+
# Pooling layers.
|
| 57 |
+
from tensorflow.python.layers.pooling import AveragePooling1D
|
| 58 |
+
from tensorflow.python.layers.pooling import MaxPooling1D
|
| 59 |
+
from tensorflow.python.layers.pooling import AveragePooling2D
|
| 60 |
+
from tensorflow.python.layers.pooling import MaxPooling2D
|
| 61 |
+
from tensorflow.python.layers.pooling import AveragePooling3D
|
| 62 |
+
from tensorflow.python.layers.pooling import MaxPooling3D
|
| 63 |
+
|
| 64 |
+
from tensorflow.python.layers.pooling import average_pooling1d
|
| 65 |
+
from tensorflow.python.layers.pooling import max_pooling1d
|
| 66 |
+
from tensorflow.python.layers.pooling import average_pooling2d
|
| 67 |
+
from tensorflow.python.layers.pooling import max_pooling2d
|
| 68 |
+
from tensorflow.python.layers.pooling import average_pooling3d
|
| 69 |
+
from tensorflow.python.layers.pooling import max_pooling3d
|
| 70 |
+
|
| 71 |
+
# pylint: enable=g-bad-import-order,unused-import
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/normalization.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
|
| 16 |
+
"""Contains the normalization layer classes and their functional aliases.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
from tensorflow.python.util import lazy_loader
|
| 20 |
+
|
| 21 |
+
normalization = lazy_loader.LazyLoader(
|
| 22 |
+
'normalization', globals(),
|
| 23 |
+
'tf_keras.legacy_tf_layers.normalization')
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# pylint: disable=invalid-name
|
| 27 |
+
# lazy load all the attributes until they are accessed for the first time
|
| 28 |
+
def __getattr__(name):
|
| 29 |
+
if name in ['BatchNormalization', 'BatchNorm']:
|
| 30 |
+
return normalization.BatchNormalization
|
| 31 |
+
elif name in ['batch_normalization', 'batch_norm']:
|
| 32 |
+
return normalization.batch_normalization
|
| 33 |
+
else:
|
| 34 |
+
raise AttributeError(f'module {__name__} doesn\'t have attribute {name}')
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/pooling.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
|
| 16 |
+
"""Contains the pooling layer classes and their functional aliases.
|
| 17 |
+
"""
|
| 18 |
+
from tensorflow.python.keras.legacy_tf_layers import pooling
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
AveragePooling1D = pooling.AveragePooling1D
|
| 22 |
+
average_pooling1d = pooling.average_pooling1d
|
| 23 |
+
MaxPooling1D = pooling.MaxPooling1D
|
| 24 |
+
max_pooling1d = pooling.max_pooling1d
|
| 25 |
+
AveragePooling2D = pooling.AveragePooling2D
|
| 26 |
+
average_pooling2d = pooling.average_pooling2d
|
| 27 |
+
MaxPooling2D = pooling.MaxPooling2D
|
| 28 |
+
max_pooling2d = pooling.max_pooling2d
|
| 29 |
+
AveragePooling3D = pooling.AveragePooling3D
|
| 30 |
+
average_pooling3d = pooling.average_pooling3d
|
| 31 |
+
MaxPooling3D = pooling.MaxPooling3D
|
| 32 |
+
max_pooling3d = pooling.max_pooling3d
|
| 33 |
+
|
| 34 |
+
# Aliases
|
| 35 |
+
|
| 36 |
+
AvgPool2D = AveragePooling2D
|
| 37 |
+
MaxPool2D = MaxPooling2D
|
| 38 |
+
max_pool2d = max_pooling2d
|
| 39 |
+
avg_pool2d = average_pooling2d
|
videochat2/lib/python3.10/site-packages/tensorflow/python/layers/utils.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
|
| 16 |
+
"""Contains layer utilities for input validation and format conversion."""
|
| 17 |
+
from tensorflow.python.framework import smart_cond as smart_module
|
| 18 |
+
from tensorflow.python.ops import cond
|
| 19 |
+
from tensorflow.python.ops import variables
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def convert_data_format(data_format, ndim):
|
| 23 |
+
if data_format == 'channels_last':
|
| 24 |
+
if ndim == 3:
|
| 25 |
+
return 'NWC'
|
| 26 |
+
elif ndim == 4:
|
| 27 |
+
return 'NHWC'
|
| 28 |
+
elif ndim == 5:
|
| 29 |
+
return 'NDHWC'
|
| 30 |
+
else:
|
| 31 |
+
raise ValueError(f'Input rank: {ndim} not supported. We only support '
|
| 32 |
+
'input rank 3, 4 or 5.')
|
| 33 |
+
elif data_format == 'channels_first':
|
| 34 |
+
if ndim == 3:
|
| 35 |
+
return 'NCW'
|
| 36 |
+
elif ndim == 4:
|
| 37 |
+
return 'NCHW'
|
| 38 |
+
elif ndim == 5:
|
| 39 |
+
return 'NCDHW'
|
| 40 |
+
else:
|
| 41 |
+
raise ValueError(f'Input rank: {ndim} not supported. We only support '
|
| 42 |
+
'input rank 3, 4 or 5.')
|
| 43 |
+
else:
|
| 44 |
+
raise ValueError(f'Invalid data_format: {data_format}. We only support '
|
| 45 |
+
'"channels_first" or "channels_last"')
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def normalize_tuple(value, n, name):
|
| 49 |
+
"""Transforms a single integer or iterable of integers into an integer tuple.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
value: The value to validate and convert. Could an int, or any iterable
|
| 53 |
+
of ints.
|
| 54 |
+
n: The size of the tuple to be returned.
|
| 55 |
+
name: The name of the argument being validated, e.g. "strides" or
|
| 56 |
+
"kernel_size". This is only used to format error messages.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
A tuple of n integers.
|
| 60 |
+
|
| 61 |
+
Raises:
|
| 62 |
+
ValueError: If something else than an int/long or iterable thereof was
|
| 63 |
+
passed.
|
| 64 |
+
"""
|
| 65 |
+
if isinstance(value, int):
|
| 66 |
+
return (value,) * n
|
| 67 |
+
else:
|
| 68 |
+
try:
|
| 69 |
+
value_tuple = tuple(value)
|
| 70 |
+
except TypeError:
|
| 71 |
+
raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} '
|
| 72 |
+
f'integers. Received: {str(value)}')
|
| 73 |
+
if len(value_tuple) != n:
|
| 74 |
+
raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} '
|
| 75 |
+
f'integers. Received: {str(value)}')
|
| 76 |
+
for single_value in value_tuple:
|
| 77 |
+
try:
|
| 78 |
+
int(single_value)
|
| 79 |
+
except (ValueError, TypeError):
|
| 80 |
+
raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} '
|
| 81 |
+
f'integers. Received: {str(value)} including element '
|
| 82 |
+
f'{str(single_value)} of type '
|
| 83 |
+
f'{str(type(single_value))}')
|
| 84 |
+
return value_tuple
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def normalize_data_format(value):
|
| 88 |
+
data_format = value.lower()
|
| 89 |
+
if data_format not in {'channels_first', 'channels_last'}:
|
| 90 |
+
raise ValueError('The `data_format` argument must be one of '
|
| 91 |
+
'"channels_first", "channels_last". Received: '
|
| 92 |
+
f'{str(value)}.')
|
| 93 |
+
return data_format
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def normalize_padding(value):
|
| 97 |
+
padding = value.lower()
|
| 98 |
+
if padding not in {'valid', 'same'}:
|
| 99 |
+
raise ValueError('The `padding` argument must be one of "valid", "same". '
|
| 100 |
+
f'Received: {str(padding)}.')
|
| 101 |
+
return padding
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
|
| 105 |
+
"""Determines output length of a convolution given input length.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
input_length: integer.
|
| 109 |
+
filter_size: integer.
|
| 110 |
+
padding: one of "same", "valid", "full".
|
| 111 |
+
stride: integer.
|
| 112 |
+
dilation: dilation rate, integer.
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
The output length (integer).
|
| 116 |
+
"""
|
| 117 |
+
if input_length is None:
|
| 118 |
+
return None
|
| 119 |
+
assert padding in {'same', 'valid', 'full'}
|
| 120 |
+
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
|
| 121 |
+
if padding == 'same':
|
| 122 |
+
output_length = input_length
|
| 123 |
+
elif padding == 'valid':
|
| 124 |
+
output_length = input_length - dilated_filter_size + 1
|
| 125 |
+
elif padding == 'full':
|
| 126 |
+
output_length = input_length + dilated_filter_size - 1
|
| 127 |
+
return (output_length + stride - 1) // stride
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def conv_input_length(output_length, filter_size, padding, stride):
|
| 131 |
+
"""Determines input length of a convolution given output length.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
output_length: integer.
|
| 135 |
+
filter_size: integer.
|
| 136 |
+
padding: one of "same", "valid", "full".
|
| 137 |
+
stride: integer.
|
| 138 |
+
|
| 139 |
+
Returns:
|
| 140 |
+
The input length (integer).
|
| 141 |
+
"""
|
| 142 |
+
if output_length is None:
|
| 143 |
+
return None
|
| 144 |
+
assert padding in {'same', 'valid', 'full'}
|
| 145 |
+
if padding == 'same':
|
| 146 |
+
pad = filter_size // 2
|
| 147 |
+
elif padding == 'valid':
|
| 148 |
+
pad = 0
|
| 149 |
+
elif padding == 'full':
|
| 150 |
+
pad = filter_size - 1
|
| 151 |
+
return (output_length - 1) * stride - 2 * pad + filter_size
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def deconv_output_length(input_length, filter_size, padding, stride):
|
| 155 |
+
"""Determines output length of a transposed convolution given input length.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
input_length: integer.
|
| 159 |
+
filter_size: integer.
|
| 160 |
+
padding: one of "same", "valid", "full".
|
| 161 |
+
stride: integer.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
The output length (integer).
|
| 165 |
+
"""
|
| 166 |
+
if input_length is None:
|
| 167 |
+
return None
|
| 168 |
+
input_length *= stride
|
| 169 |
+
if padding == 'valid':
|
| 170 |
+
input_length += max(filter_size - stride, 0)
|
| 171 |
+
elif padding == 'full':
|
| 172 |
+
input_length -= (stride + filter_size - 2)
|
| 173 |
+
return input_length
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
|
| 177 |
+
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
|
| 178 |
+
|
| 179 |
+
If `pred` is a bool or has a constant value, we return either `true_fn()`
|
| 180 |
+
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
pred: A scalar determining whether to return the result of `true_fn` or
|
| 184 |
+
`false_fn`.
|
| 185 |
+
true_fn: The callable to be performed if pred is true.
|
| 186 |
+
false_fn: The callable to be performed if pred is false.
|
| 187 |
+
name: Optional name prefix when using `tf.cond`.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
Tensors returned by the call to either `true_fn` or `false_fn`.
|
| 191 |
+
|
| 192 |
+
Raises:
|
| 193 |
+
TypeError: If `true_fn` or `false_fn` is not callable.
|
| 194 |
+
"""
|
| 195 |
+
if isinstance(pred, variables.Variable):
|
| 196 |
+
return cond.cond(
|
| 197 |
+
pred, true_fn=true_fn, false_fn=false_fn, name=name)
|
| 198 |
+
return smart_module.smart_cond(
|
| 199 |
+
pred, true_fn=true_fn, false_fn=false_fn, name=name)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def constant_value(pred):
|
| 203 |
+
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
pred: A scalar, either a Python bool or a TensorFlow boolean variable
|
| 207 |
+
or tensor, or the Python integer 1 or 0.
|
| 208 |
+
|
| 209 |
+
Returns:
|
| 210 |
+
True or False if `pred` has a constant boolean value, None otherwise.
|
| 211 |
+
|
| 212 |
+
Raises:
|
| 213 |
+
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
|
| 214 |
+
integer 1 or 0.
|
| 215 |
+
"""
|
| 216 |
+
# Allow integer booleans.
|
| 217 |
+
if isinstance(pred, int):
|
| 218 |
+
if pred == 1:
|
| 219 |
+
pred = True
|
| 220 |
+
elif pred == 0:
|
| 221 |
+
pred = False
|
| 222 |
+
|
| 223 |
+
if isinstance(pred, variables.Variable):
|
| 224 |
+
return None
|
| 225 |
+
return smart_module.smart_constant_value(pred)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
|
| 16 |
+
"""Ops related to Tensor Processing Units."""
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
os.environ['TPU_ML_PLATFORM'] = 'Tensorflow'
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/api.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
"""Modules that need to be exported to the API.
|
| 16 |
+
|
| 17 |
+
List TPU modules that aren't included elsewhere here so that they can be scanned
|
| 18 |
+
for tf_export decorations.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
# pylint: disable=unused-import
|
| 22 |
+
from tensorflow.python.tpu import bfloat16
|
| 23 |
+
from tensorflow.python.tpu import feature_column_v2
|
| 24 |
+
from tensorflow.python.tpu import tpu
|
| 25 |
+
|
| 26 |
+
from tensorflow.python.tpu import tpu_embedding_for_serving
|
| 27 |
+
from tensorflow.python.tpu import tpu_embedding_v1
|
| 28 |
+
from tensorflow.python.tpu import tpu_embedding_v2
|
| 29 |
+
from tensorflow.python.tpu import tpu_embedding_v2_utils
|
| 30 |
+
from tensorflow.python.tpu import tpu_embedding_v3
|
| 31 |
+
from tensorflow.python.tpu import tpu_hardware_feature
|
| 32 |
+
from tensorflow.python.tpu import tpu_optimizer
|
| 33 |
+
# pylint: enable=unused-import
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/device_assignment.py
ADDED
|
@@ -0,0 +1,569 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ======================================
|
| 15 |
+
"""Library of TPU helper functions."""
|
| 16 |
+
|
| 17 |
+
import enum
|
| 18 |
+
import math
|
| 19 |
+
from typing import List, Optional, Tuple
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 24 |
+
from tensorflow.python.tpu.topology import Topology
|
| 25 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
SINGLE_CORE_ASSIGNMENT = [[[0, 0, 0, 0]]]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _compute_task_and_cores_to_replicas(core_assignment, topology):
|
| 32 |
+
"""Computes a nested dict which maps task and logical core to replicas."""
|
| 33 |
+
task_and_cores_to_replicas = {}
|
| 34 |
+
for replica in range(core_assignment.shape[0]):
|
| 35 |
+
for logical_core in range(core_assignment.shape[1]):
|
| 36 |
+
coordinates = core_assignment[replica, logical_core, :]
|
| 37 |
+
task_id = topology.task_ordinal_at_coordinates(coordinates)
|
| 38 |
+
if task_id not in task_and_cores_to_replicas:
|
| 39 |
+
task_and_cores_to_replicas[task_id] = {}
|
| 40 |
+
if logical_core not in task_and_cores_to_replicas[task_id]:
|
| 41 |
+
task_and_cores_to_replicas[task_id][logical_core] = set()
|
| 42 |
+
|
| 43 |
+
task_and_cores_to_replicas[task_id][logical_core].add(replica)
|
| 44 |
+
|
| 45 |
+
task_to_sorted_replica_id = {}
|
| 46 |
+
|
| 47 |
+
for task, core_to_replicas in task_and_cores_to_replicas.items():
|
| 48 |
+
core_to_sorted_replicas = {}
|
| 49 |
+
for core, replicas in core_to_replicas.items():
|
| 50 |
+
core_to_sorted_replicas[core] = sorted(replicas)
|
| 51 |
+
|
| 52 |
+
task_to_sorted_replica_id[task] = core_to_sorted_replicas
|
| 53 |
+
return task_to_sorted_replica_id
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@tf_export("tpu.experimental.DeviceOrderMode")
|
| 57 |
+
class DeviceOrderMode(enum.IntEnum):
|
| 58 |
+
"""The way of determining device orders when computing device assignment."""
|
| 59 |
+
# By default the mode is set to AUTO, the library will choose to form rings
|
| 60 |
+
# when that is possible.
|
| 61 |
+
AUTO = 0
|
| 62 |
+
# Form rings for replicas and model-parallel cores.
|
| 63 |
+
RING = 1
|
| 64 |
+
# Form meshes for replicas and/or model-parallel cores.
|
| 65 |
+
MESH = 2
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@tf_export("tpu.experimental.DeviceAssignment")
|
| 69 |
+
class DeviceAssignment(object):
|
| 70 |
+
"""Mapping from logical cores in a computation to the physical TPU topology.
|
| 71 |
+
|
| 72 |
+
Prefer to use the `DeviceAssignment.build()` helper to construct a
|
| 73 |
+
`DeviceAssignment`; it is easier if less flexible than constructing a
|
| 74 |
+
`DeviceAssignment` directly.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
def __init__(self, topology: Topology, core_assignment: np.ndarray):
|
| 78 |
+
"""Constructs a `DeviceAssignment` object.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
topology: A `Topology` object that describes the physical TPU topology.
|
| 82 |
+
core_assignment: A logical to physical core mapping, represented as a
|
| 83 |
+
rank 3 numpy array. See the description of the `core_assignment`
|
| 84 |
+
property for more details.
|
| 85 |
+
|
| 86 |
+
Raises:
|
| 87 |
+
ValueError: If `topology` is not `Topology` object.
|
| 88 |
+
ValueError: If `core_assignment` is not a rank 3 numpy array.
|
| 89 |
+
"""
|
| 90 |
+
if not isinstance(topology, Topology):
|
| 91 |
+
raise ValueError("topology must be a Topology object, got {}".format(
|
| 92 |
+
type(topology)))
|
| 93 |
+
core_assignment = np.asarray(core_assignment, dtype=np.int32)
|
| 94 |
+
|
| 95 |
+
self._topology = topology
|
| 96 |
+
|
| 97 |
+
if core_assignment.ndim != 3:
|
| 98 |
+
raise ValueError("core_assignment must be a rank 3 numpy array, "
|
| 99 |
+
f"got shape {core_assignment.shape}")
|
| 100 |
+
|
| 101 |
+
self._num_replicas = core_assignment.shape[0]
|
| 102 |
+
self._num_cores_per_replica = core_assignment.shape[1]
|
| 103 |
+
|
| 104 |
+
if core_assignment.shape[-1] != topology.mesh_rank:
|
| 105 |
+
raise ValueError(
|
| 106 |
+
"core_assignment.shape[-1] must have size equal to topology "
|
| 107 |
+
f"rank ({topology.mesh_rank}), got "
|
| 108 |
+
f"core_assignment.shape={core_assignment.shape}")
|
| 109 |
+
|
| 110 |
+
self._core_assignment = core_assignment
|
| 111 |
+
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
|
| 112 |
+
self._core_assignment, topology)
|
| 113 |
+
|
| 114 |
+
@property
|
| 115 |
+
def topology(self) -> Topology:
|
| 116 |
+
"""A `Topology` that describes the TPU topology."""
|
| 117 |
+
return self._topology
|
| 118 |
+
|
| 119 |
+
@property
|
| 120 |
+
def num_cores_per_replica(self) -> int:
|
| 121 |
+
"""The number of cores per replica."""
|
| 122 |
+
return self._num_cores_per_replica
|
| 123 |
+
|
| 124 |
+
@property
|
| 125 |
+
def num_replicas(self) -> int:
|
| 126 |
+
"""The number of replicas of the computation."""
|
| 127 |
+
return self._num_replicas
|
| 128 |
+
|
| 129 |
+
@property
|
| 130 |
+
def core_assignment(self) -> np.ndarray:
|
| 131 |
+
"""The logical to physical core mapping.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
An integer numpy array of rank 3, with shape
|
| 135 |
+
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
|
| 136 |
+
(replica, logical core) pairs to physical topology coordinates.
|
| 137 |
+
"""
|
| 138 |
+
return self._core_assignment
|
| 139 |
+
|
| 140 |
+
def coordinates(self, replica: int, logical_core: int) -> Tuple: # pylint:disable=g-bare-generic
|
| 141 |
+
"""Returns the physical topology coordinates of a logical core."""
|
| 142 |
+
return tuple(self.core_assignment[replica, logical_core, :])
|
| 143 |
+
|
| 144 |
+
def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:
|
| 145 |
+
"""Lookup replica ids by task number and logical core.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
task_id: TensorFlow task number.
|
| 149 |
+
logical_core: An integer, identifying a logical core.
|
| 150 |
+
Returns:
|
| 151 |
+
A sorted list of the replicas that are attached to that task and
|
| 152 |
+
logical_core.
|
| 153 |
+
Raises:
|
| 154 |
+
ValueError: If no replica exists in the task which contains the logical
|
| 155 |
+
core.
|
| 156 |
+
"""
|
| 157 |
+
try:
|
| 158 |
+
return self._task_and_cores_to_replicas[task_id][logical_core]
|
| 159 |
+
except KeyError:
|
| 160 |
+
raise ValueError(
|
| 161 |
+
"Can not find any replica in task: {} contains logical_core: {} ".
|
| 162 |
+
format(task_id, logical_core))
|
| 163 |
+
|
| 164 |
+
def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int:
|
| 165 |
+
"""Returns the ordinal of the TPU device assigned to a logical core."""
|
| 166 |
+
coordinates = self.coordinates(replica, logical_core)
|
| 167 |
+
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
|
| 168 |
+
|
| 169 |
+
def host_device(self,
|
| 170 |
+
replica: int = 0,
|
| 171 |
+
logical_core: int = 0,
|
| 172 |
+
job: Optional[str] = None) -> str:
|
| 173 |
+
"""Returns the CPU device attached to a logical core."""
|
| 174 |
+
coordinates = self.coordinates(replica, logical_core)
|
| 175 |
+
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
|
| 176 |
+
|
| 177 |
+
def tpu_device(self,
|
| 178 |
+
replica: int = 0,
|
| 179 |
+
logical_core: int = 0,
|
| 180 |
+
job: Optional[str] = None) -> str:
|
| 181 |
+
"""Returns the name of the TPU device assigned to a logical core."""
|
| 182 |
+
coordinates = self.coordinates(replica, logical_core)
|
| 183 |
+
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
|
| 184 |
+
|
| 185 |
+
@classmethod
|
| 186 |
+
def build(
|
| 187 |
+
cls,
|
| 188 |
+
topology: Topology,
|
| 189 |
+
computation_shape: Optional[np.ndarray] = None,
|
| 190 |
+
computation_stride: Optional[np.ndarray] = None,
|
| 191 |
+
num_replicas: int = 1,
|
| 192 |
+
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO,
|
| 193 |
+
) -> "DeviceAssignment":
|
| 194 |
+
return device_assignment(
|
| 195 |
+
topology=topology,
|
| 196 |
+
computation_shape=computation_shape,
|
| 197 |
+
computation_stride=computation_stride,
|
| 198 |
+
num_replicas=num_replicas,
|
| 199 |
+
device_order_mode=device_order_mode,
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def _open_ring_2d(x_size: int, y_size: int,
|
| 204 |
+
z_coord: int) -> List[Tuple[int, int, int]]:
|
| 205 |
+
"""Ring-order of a X by Y mesh, with a fixed Z coordinate.
|
| 206 |
+
|
| 207 |
+
For example, in a 4x4 mesh, this returns the following order.
|
| 208 |
+
0 -- 1 -- 2 -- 3
|
| 209 |
+
| | | |
|
| 210 |
+
15-- 6 -- 5 -- 4
|
| 211 |
+
| | | |
|
| 212 |
+
14-- 7 -- 8 -- 9
|
| 213 |
+
| | | |
|
| 214 |
+
13-- 12-- 11-- 10
|
| 215 |
+
|
| 216 |
+
Note that chip 0 is not included in the output.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
x_size: An integer represents the mesh size in the x-dimension. Must be
|
| 220 |
+
larger than 1.
|
| 221 |
+
y_size: An integer represents the mesh size in the y-dimension. Must be
|
| 222 |
+
larger than 1.
|
| 223 |
+
z_coord: An integer represents the z-coordinate to use for the chips in the
|
| 224 |
+
ring.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
A list of (x,y,z) triples in ring order.
|
| 228 |
+
"""
|
| 229 |
+
ret = []
|
| 230 |
+
for i in range(y_size // 2):
|
| 231 |
+
for j in range(1, x_size):
|
| 232 |
+
ret.append((j, 2 * i, z_coord))
|
| 233 |
+
for j in range(x_size - 1, 0, -1):
|
| 234 |
+
ret.append((j, 2 * i + 1, z_coord))
|
| 235 |
+
for i in range(y_size - 1, 0, -1):
|
| 236 |
+
ret.append((0, i, z_coord))
|
| 237 |
+
return ret
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def _ring_3d(x_size: int, y_size: int,
|
| 241 |
+
z_size: int) -> List[Tuple[int, int, int]]:
|
| 242 |
+
"""Ring-order of a X by Y by Z mesh.
|
| 243 |
+
|
| 244 |
+
Constructs the 3d ring from 2d rings that are stacked in the Z dimension and
|
| 245 |
+
joined in one corner.
|
| 246 |
+
|
| 247 |
+
z == 0:
|
| 248 |
+
0 -- 1 -- 2 -- 3
|
| 249 |
+
| | | |
|
| 250 |
+
15 - 6 -- 5 -- 4
|
| 251 |
+
| | | |
|
| 252 |
+
14 - 7 -- 8 -- 9
|
| 253 |
+
| | | |
|
| 254 |
+
13 - 12 - 11 - 10
|
| 255 |
+
z == 1:
|
| 256 |
+
63 - 30 - 29 - 28
|
| 257 |
+
| | | |
|
| 258 |
+
16 - 25 - 26 - 27
|
| 259 |
+
| | | |
|
| 260 |
+
17 - 24 - 23 - 22
|
| 261 |
+
| | | |
|
| 262 |
+
18 - 19 - 20 - 21
|
| 263 |
+
z == 2:
|
| 264 |
+
62 - 31 - 32 - 33
|
| 265 |
+
| | | |
|
| 266 |
+
45 - 36 - 35 - 34
|
| 267 |
+
| | | |
|
| 268 |
+
44 - 37 - 38 - 39
|
| 269 |
+
| | | |
|
| 270 |
+
43 - 42 - 41 - 40
|
| 271 |
+
z == 3:
|
| 272 |
+
61 - 60 - 59 - 58
|
| 273 |
+
| | | |
|
| 274 |
+
46 - 55 - 56 - 57
|
| 275 |
+
| | | |
|
| 276 |
+
47 - 54 - 53 - 52
|
| 277 |
+
| | | |
|
| 278 |
+
48 - 49 - 50 - 51
|
| 279 |
+
|
| 280 |
+
Args:
|
| 281 |
+
x_size: An integer represents the mesh size in the x-dimension. Must be
|
| 282 |
+
larger than 1.
|
| 283 |
+
y_size: An integer represents the mesh size in the y-dimension. Must be
|
| 284 |
+
larger than 1.
|
| 285 |
+
z_size: An integer represents the mesh size in the z-dimension. Must be
|
| 286 |
+
larger than 1. For example, in a 4x4x4 mesh, this returns the following
|
| 287 |
+
order.
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
A list of (x,y,z) triples in ring order.
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
# Handle the case where 2 dimensions are size 1.
|
| 294 |
+
if x_size == 1 and y_size == 1:
|
| 295 |
+
return [(0, 0, i) for i in range(z_size)]
|
| 296 |
+
if x_size == 1 and z_size == 1:
|
| 297 |
+
return [(0, i, 0) for i in range(y_size)]
|
| 298 |
+
if y_size == 1 and z_size == 1:
|
| 299 |
+
return [(i, 0, 0) for i in range(x_size)]
|
| 300 |
+
|
| 301 |
+
# Handle odd mesh dimensions. This never happens in practice, so we don't
|
| 302 |
+
# bother to try building something optimal.
|
| 303 |
+
if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and
|
| 304 |
+
y_size % 2 != 0) or (z_size > 1 and
|
| 305 |
+
z_size % 2 != 0):
|
| 306 |
+
logging.warning("Odd dimension")
|
| 307 |
+
ret = []
|
| 308 |
+
for z in range(z_size):
|
| 309 |
+
for y in range(y_size):
|
| 310 |
+
ret.extend((x, y, z) for x in range(x_size))
|
| 311 |
+
return ret
|
| 312 |
+
|
| 313 |
+
# Always start with chip 0.
|
| 314 |
+
ret = [(0, 0, 0)]
|
| 315 |
+
# Handle the case where one dimension is size 1. We just build a flat, 2d
|
| 316 |
+
# ring.
|
| 317 |
+
if z_size == 1:
|
| 318 |
+
ret.extend(_open_ring_2d(x_size, y_size, 0))
|
| 319 |
+
return ret
|
| 320 |
+
if y_size == 1:
|
| 321 |
+
ret = [(0, 0, 0)]
|
| 322 |
+
ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0))
|
| 323 |
+
return ret
|
| 324 |
+
if x_size == 1:
|
| 325 |
+
ret = [(0, 0, 0)]
|
| 326 |
+
ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0))
|
| 327 |
+
return ret
|
| 328 |
+
|
| 329 |
+
# Handle the case where all dimensions have size > 1 and even.
|
| 330 |
+
ret = [(0, 0, 0)]
|
| 331 |
+
for i in range(0, z_size):
|
| 332 |
+
r = _open_ring_2d(x_size, y_size, i)
|
| 333 |
+
if i % 2 == 0:
|
| 334 |
+
ret.extend(r)
|
| 335 |
+
else:
|
| 336 |
+
ret.extend(reversed(r))
|
| 337 |
+
for i in range(z_size - 1, 0, -1):
|
| 338 |
+
ret.append((0, 0, i))
|
| 339 |
+
return ret
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def device_assignment(
|
| 343 |
+
topology: Topology,
|
| 344 |
+
computation_shape: Optional[np.ndarray] = None,
|
| 345 |
+
computation_stride: Optional[np.ndarray] = None,
|
| 346 |
+
num_replicas: int = 1,
|
| 347 |
+
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO,
|
| 348 |
+
) -> DeviceAssignment:
|
| 349 |
+
"""Computes a device_assignment of a computation across a TPU topology.
|
| 350 |
+
|
| 351 |
+
Attempts to choose a compact grid of cores for locality.
|
| 352 |
+
|
| 353 |
+
Returns a `DeviceAssignment` that describes the cores in the topology assigned
|
| 354 |
+
to each core of each replica.
|
| 355 |
+
|
| 356 |
+
`computation_shape` and `computation_stride` values should be powers of 2 for
|
| 357 |
+
optimal packing.
|
| 358 |
+
|
| 359 |
+
Args:
|
| 360 |
+
topology: A `Topology` object that describes the TPU cluster topology. To
|
| 361 |
+
obtain a TPU topology, evaluate the `Tensor` returned by
|
| 362 |
+
`initialize_system` using `Session.run`. Either a serialized
|
| 363 |
+
`TopologyProto` or a `Topology` object may be passed. Note: you must
|
| 364 |
+
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor`
|
| 365 |
+
here.
|
| 366 |
+
computation_shape: A rank 1 int32 numpy array with size equal to the
|
| 367 |
+
topology rank, describing the shape of the computation's block of cores.
|
| 368 |
+
If None, the `computation_shape` is `[1] * topology_rank`.
|
| 369 |
+
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
|
| 370 |
+
describing the inter-core spacing of the `computation_shape` cores in the
|
| 371 |
+
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
|
| 372 |
+
num_replicas: The number of computation replicas to run. The replicas will
|
| 373 |
+
be packed into the free spaces of the topology.
|
| 374 |
+
device_order_mode: An enum of `DeviceOrderMode` class which indicates
|
| 375 |
+
whether to assign devices to form rings or meshes, or let the library to
|
| 376 |
+
choose.
|
| 377 |
+
|
| 378 |
+
Returns:
|
| 379 |
+
A DeviceAssignment object, which describes the mapping between the logical
|
| 380 |
+
cores in each computation replica and the physical cores in the TPU
|
| 381 |
+
topology.
|
| 382 |
+
|
| 383 |
+
Raises:
|
| 384 |
+
ValueError: If `topology` is not a valid `Topology` object.
|
| 385 |
+
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
|
| 386 |
+
numpy arrays with shape [3] where all values are positive.
|
| 387 |
+
ValueError: If computation's replicas cannot fit into the TPU topology.
|
| 388 |
+
"""
|
| 389 |
+
# Deserialize the Topology proto, if it is a string.
|
| 390 |
+
if isinstance(topology, bytes):
|
| 391 |
+
topology = Topology(serialized=topology)
|
| 392 |
+
|
| 393 |
+
if not isinstance(topology, Topology):
|
| 394 |
+
raise ValueError(
|
| 395 |
+
f"`topology` is not a Topology object; got {type(topology)}")
|
| 396 |
+
|
| 397 |
+
topology_rank = len(topology.mesh_shape)
|
| 398 |
+
mesh_shape = topology.mesh_shape
|
| 399 |
+
if computation_shape is None:
|
| 400 |
+
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
|
| 401 |
+
else:
|
| 402 |
+
computation_shape = np.asarray(computation_shape, dtype=np.int32)
|
| 403 |
+
|
| 404 |
+
if computation_stride is None:
|
| 405 |
+
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
|
| 406 |
+
else:
|
| 407 |
+
computation_stride = np.asarray(computation_stride, dtype=np.int32)
|
| 408 |
+
|
| 409 |
+
if computation_shape.shape != (topology_rank,):
|
| 410 |
+
raise ValueError(
|
| 411 |
+
f"computation_shape must have shape [{topology_rank}]; "
|
| 412 |
+
f"got {computation_shape.shape}"
|
| 413 |
+
)
|
| 414 |
+
if computation_stride.shape != (topology_rank,):
|
| 415 |
+
raise ValueError(
|
| 416 |
+
f"computation_stride must have shape [{topology_rank}]; "
|
| 417 |
+
f"got {computation_stride.shape}"
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
if any(computation_shape < 1):
|
| 421 |
+
raise ValueError(
|
| 422 |
+
"computation_shape must be positive; got computation_shape={}".format(
|
| 423 |
+
computation_shape))
|
| 424 |
+
if any(computation_stride < 1):
|
| 425 |
+
raise ValueError(
|
| 426 |
+
"computation_stride must be positive; got computation_stride={}".format(
|
| 427 |
+
computation_stride))
|
| 428 |
+
|
| 429 |
+
# Computes the physical size of one computation instance.
|
| 430 |
+
computation_footprint = computation_shape * computation_stride
|
| 431 |
+
if any(computation_footprint > mesh_shape):
|
| 432 |
+
raise ValueError(
|
| 433 |
+
"computation footprint {} does not fit in TPU topology shape {}".format(
|
| 434 |
+
computation_footprint, mesh_shape))
|
| 435 |
+
|
| 436 |
+
# Computes how many copies of the computation footprint fit in the mesh.
|
| 437 |
+
block_counts = mesh_shape // computation_footprint
|
| 438 |
+
|
| 439 |
+
replica_counts = block_counts * computation_stride
|
| 440 |
+
max_replicas = np.prod(replica_counts)
|
| 441 |
+
if num_replicas > max_replicas:
|
| 442 |
+
raise ValueError(
|
| 443 |
+
"requested {} replicas but only {} replicas with shape {} and "
|
| 444 |
+
"computation_stride {} fit in a TPU mesh of shape {}".format(
|
| 445 |
+
num_replicas, max_replicas, computation_shape, computation_stride,
|
| 446 |
+
mesh_shape))
|
| 447 |
+
|
| 448 |
+
def ceil_of_ratio(n, m):
|
| 449 |
+
return (n + m - 1) // m
|
| 450 |
+
|
| 451 |
+
if topology.missing_devices.size == 0:
|
| 452 |
+
replica_shape = [0] * topology_rank
|
| 453 |
+
if num_replicas > 0:
|
| 454 |
+
remaining_replicas = num_replicas
|
| 455 |
+
remaining_dims = topology_rank
|
| 456 |
+
|
| 457 |
+
# Choose dimensions as close to an equal cube as possible,
|
| 458 |
+
# in order of increasing dimension size. By visiting dimensions
|
| 459 |
+
# in increasing size, we assign the most constrained dimension
|
| 460 |
+
# first, so we won't make infeasible choices.
|
| 461 |
+
#
|
| 462 |
+
# As a secondary sort order, visit the last dimension (core index) first,
|
| 463 |
+
# then the other dimensions in increasing order. This means we try to use
|
| 464 |
+
# both cores on the same chip in preference to two cores on different
|
| 465 |
+
# chips. We visit the x dimension first, and the z dimension last, so
|
| 466 |
+
# that we prefer to arrange adjacent replicas on the same machine when
|
| 467 |
+
# possible.
|
| 468 |
+
#
|
| 469 |
+
# For example, if num_replicas == 4, we prefer to use a replica_shape of
|
| 470 |
+
# (2,1,1,2) over (1,1,2,2).
|
| 471 |
+
|
| 472 |
+
for x, ni in sorted(((x, ((i + 1) % topology_rank))
|
| 473 |
+
for (i, x) in enumerate(replica_counts))):
|
| 474 |
+
i = (ni + topology_rank - 1) % topology_rank
|
| 475 |
+
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
|
| 476 |
+
replica_shape[i] = min(target_size, x)
|
| 477 |
+
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
|
| 478 |
+
remaining_dims -= 1
|
| 479 |
+
|
| 480 |
+
assert remaining_replicas == 1 and remaining_dims == 0
|
| 481 |
+
|
| 482 |
+
# Assigns an offset to each replica such that no two replicas overlap.
|
| 483 |
+
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
|
| 484 |
+
|
| 485 |
+
enable_3d_tiling = (
|
| 486 |
+
topology_rank == 4 and
|
| 487 |
+
computation_shape[-1] == mesh_shape[-1] # Only handle 3D case.
|
| 488 |
+
and np.prod(computation_stride) == 1 # Ensure no stride.
|
| 489 |
+
and num_replicas == max_replicas) # Full replication.
|
| 490 |
+
|
| 491 |
+
if device_order_mode != DeviceOrderMode.AUTO:
|
| 492 |
+
if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling:
|
| 493 |
+
raise ValueError(
|
| 494 |
+
"device_order_mode=DeviceOrderMode.RING is not compatible with the "
|
| 495 |
+
"3D tiling current topology. Try setting "
|
| 496 |
+
"device_order_mode=DeviceOrderMode.AUTO"
|
| 497 |
+
)
|
| 498 |
+
enable_3d_tiling = device_order_mode == DeviceOrderMode.RING
|
| 499 |
+
|
| 500 |
+
if enable_3d_tiling:
|
| 501 |
+
assignment = []
|
| 502 |
+
inner_ring = _ring_3d(computation_shape[0], computation_shape[1],
|
| 503 |
+
computation_shape[2])
|
| 504 |
+
outer_ring = _ring_3d(replica_shape[0], replica_shape[1],
|
| 505 |
+
replica_shape[2])
|
| 506 |
+
|
| 507 |
+
for replica in range(num_replicas):
|
| 508 |
+
outer_x, outer_y, outer_z = outer_ring[replica]
|
| 509 |
+
per_replica_assignment = []
|
| 510 |
+
for index in range(np.prod(computation_shape)):
|
| 511 |
+
inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]]
|
| 512 |
+
px = outer_x * computation_shape[0] + inner_x
|
| 513 |
+
py = outer_y * computation_shape[1] + inner_y
|
| 514 |
+
pz = outer_z * computation_shape[2] + inner_z
|
| 515 |
+
pi = index % mesh_shape[-1]
|
| 516 |
+
per_replica_assignment.append([px, py, pz, pi])
|
| 517 |
+
assignment.append(per_replica_assignment)
|
| 518 |
+
else:
|
| 519 |
+
for replica in range(num_replicas):
|
| 520 |
+
# Chooses a replica number in each axis.
|
| 521 |
+
t = replica
|
| 522 |
+
pos = []
|
| 523 |
+
# Visit the core number first.
|
| 524 |
+
for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]):
|
| 525 |
+
pos.append(t % dim)
|
| 526 |
+
t //= dim
|
| 527 |
+
replica_pos = np.concatenate([pos[1:], [pos[0]]])
|
| 528 |
+
|
| 529 |
+
# Determines where that replica starts in each axis.
|
| 530 |
+
outer = replica_pos // computation_stride
|
| 531 |
+
inner = replica_pos % computation_stride
|
| 532 |
+
replica_offsets[replica, :] = outer * computation_footprint + inner
|
| 533 |
+
|
| 534 |
+
# Computes a logical core -> physical core mapping for each replica.
|
| 535 |
+
indices = [
|
| 536 |
+
np.arange(0, computation_shape[i] * computation_stride[i],
|
| 537 |
+
computation_stride[i]) for i in range(topology_rank)
|
| 538 |
+
]
|
| 539 |
+
indices = np.concatenate(
|
| 540 |
+
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
|
| 541 |
+
axis=-1)
|
| 542 |
+
indices = indices.reshape((-1, topology_rank))
|
| 543 |
+
assignment = indices + replica_offsets[:, np.newaxis, :]
|
| 544 |
+
else:
|
| 545 |
+
# We have a slice with missing chips. We define a simple assignment by
|
| 546 |
+
# ignoring computation stride. This assignment should enable a consistent
|
| 547 |
+
# and correct device assignment on degraded slices. It is optimal when
|
| 548 |
+
# weights are not sharded. But this device assignment may be sub-optimal for
|
| 549 |
+
# other model parallelism scenarios.
|
| 550 |
+
assert np.prod(computation_stride) == 1
|
| 551 |
+
# Next, we check if we have sufficient devices.
|
| 552 |
+
assert num_replicas * np.prod(
|
| 553 |
+
computation_shape) <= topology.num_tasks * topology.num_tpus_per_task
|
| 554 |
+
# Map replicas to physical devices in task order.
|
| 555 |
+
device_coordinates = topology.device_coordinates
|
| 556 |
+
assignment = []
|
| 557 |
+
devices_per_replica = np.prod(computation_shape)
|
| 558 |
+
for rindex in range(num_replicas):
|
| 559 |
+
replica_assignment = []
|
| 560 |
+
for index in range(devices_per_replica):
|
| 561 |
+
logical_id = rindex * devices_per_replica + index
|
| 562 |
+
# Pick logical cores in task order
|
| 563 |
+
task = logical_id // topology.num_tpus_per_task
|
| 564 |
+
device = logical_id % topology.num_tpus_per_task
|
| 565 |
+
# Append physical cores to the replica assignment
|
| 566 |
+
replica_assignment.append(device_coordinates[task, device, :])
|
| 567 |
+
assignment.append(replica_assignment)
|
| 568 |
+
|
| 569 |
+
return DeviceAssignment(topology, core_assignment=assignment)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column.py
ADDED
|
@@ -0,0 +1,690 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ===================================================================
|
| 15 |
+
"""TPU Feature Column Library."""
|
| 16 |
+
import math
|
| 17 |
+
|
| 18 |
+
from tensorflow.python.feature_column import feature_column as fc
|
| 19 |
+
from tensorflow.python.feature_column import feature_column_lib as fc_lib
|
| 20 |
+
from tensorflow.python.framework import ops
|
| 21 |
+
from tensorflow.python.ops import array_ops
|
| 22 |
+
from tensorflow.python.ops import init_ops
|
| 23 |
+
from tensorflow.python.ops import variable_scope
|
| 24 |
+
from tensorflow.python.tpu import tpu
|
| 25 |
+
from tensorflow.python.tpu import tpu_function
|
| 26 |
+
from tensorflow.python.tpu import tpu_replication
|
| 27 |
+
# pylint: disable=protected-access
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
_TPU_FC_TO_SCOPE = '_tpu_feature_column_scope'
|
| 31 |
+
_SUPPORTED_SEQUENCE_COLUMNS = (fc._SequenceCategoricalColumn,
|
| 32 |
+
fc_lib.SequenceCategoricalColumn)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# For V2 columns, we support anything that inherits from CategoricalColumn
|
| 36 |
+
# other than those in the denylist. User-provided columns that inherit from
|
| 37 |
+
# CategoricalColumn may or may not be compatible; it is up to the user to
|
| 38 |
+
# manage TPU compatibility for custom columns.
|
| 39 |
+
_SUPPORTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.CategoricalColumn,)
|
| 40 |
+
_DENYLISTED_CATEGORICAL_COLUMNS_V2 = (fc_lib.HashedCategoricalColumn,
|
| 41 |
+
fc_lib.BucketizedColumn,
|
| 42 |
+
fc_lib.CrossedColumn)
|
| 43 |
+
_SUPPORTED_CATEGORICAL_COLUMNS = (fc._IdentityCategoricalColumn,
|
| 44 |
+
fc._VocabularyFileCategoricalColumn,
|
| 45 |
+
fc._VocabularyListCategoricalColumn,
|
| 46 |
+
fc._WeightedCategoricalColumn,
|
| 47 |
+
fc._SequenceCategoricalColumn
|
| 48 |
+
) + _SUPPORTED_CATEGORICAL_COLUMNS_V2
|
| 49 |
+
_SEQUENCE_FEATURE_LENGTH_POSTFIX = '_seq_length_'
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def embedding_column(categorical_column,
|
| 53 |
+
dimension,
|
| 54 |
+
combiner='mean',
|
| 55 |
+
initializer=None,
|
| 56 |
+
max_sequence_length=0,
|
| 57 |
+
learning_rate_fn=None,
|
| 58 |
+
use_safe_embedding_lookup=True):
|
| 59 |
+
"""TPU embedding_column for `tf.feature_column.embedding_column`.
|
| 60 |
+
|
| 61 |
+
Note that the interface for TPU embedding_column is different from the non-TPU
|
| 62 |
+
version. The following args available for the non-TPU version are NOT
|
| 63 |
+
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
categorical_column: A categorical_column returned from
|
| 67 |
+
categorical_column_with_identity, weighted_categorical_column,
|
| 68 |
+
categorical_column_with_vocabulary_file,
|
| 69 |
+
categorical_column_with_vocabulary_list,
|
| 70 |
+
sequence_categorical_column_with_identity,
|
| 71 |
+
sequence_categorical_column_with_vocabulary_file,
|
| 72 |
+
sequence_categorical_column_with_vocabulary_list
|
| 73 |
+
dimension: An integer specifying dimension of the embedding, must be > 0.
|
| 74 |
+
combiner: A string specifying how to reduce if there are multiple entries
|
| 75 |
+
in a single row for a non-sequence column. For more information, see
|
| 76 |
+
`tf.feature_column.embedding_column`.
|
| 77 |
+
initializer: A variable initializer function to be used in embedding
|
| 78 |
+
variable initialization. If not specified, defaults to
|
| 79 |
+
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
|
| 80 |
+
standard deviation `1/sqrt(dimension)`.
|
| 81 |
+
max_sequence_length: An non-negative integer specifying the max sequence
|
| 82 |
+
length. Any sequence shorter then this will be padded with 0 embeddings
|
| 83 |
+
and any sequence longer will be truncated. This must be positive for
|
| 84 |
+
sequence features and 0 for non-sequence features.
|
| 85 |
+
learning_rate_fn: A function that takes global step and returns learning
|
| 86 |
+
rate for the embedding table. If you intend to use the same learning rate
|
| 87 |
+
for multiple embedding tables, please ensure that you pass the exact same
|
| 88 |
+
python function to all calls of embedding_column, otherwise performence
|
| 89 |
+
may suffer.
|
| 90 |
+
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
|
| 91 |
+
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
|
| 92 |
+
there are no empty rows and all weights and ids are positive at the
|
| 93 |
+
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
|
| 94 |
+
input tensors. Defaults to true, consider turning off if the above checks
|
| 95 |
+
are not needed. Note that having empty rows will not trigger any error
|
| 96 |
+
though the output result might be 0 or omitted.
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
A _TPUEmbeddingColumn.
|
| 100 |
+
|
| 101 |
+
Raises:
|
| 102 |
+
ValueError: if `dimension` not > 0.
|
| 103 |
+
ValueError: if `initializer` is specified but not callable.
|
| 104 |
+
TypeError: if categorical_column is not a supported type.
|
| 105 |
+
"""
|
| 106 |
+
if isinstance(categorical_column, _DENYLISTED_CATEGORICAL_COLUMNS_V2):
|
| 107 |
+
raise TypeError('categorical_column for tpu '
|
| 108 |
+
' embedding_column was '
|
| 109 |
+
f'denylisted type {type(categorical_column)}')
|
| 110 |
+
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
|
| 111 |
+
raise TypeError(
|
| 112 |
+
'categorical_column for tpu '
|
| 113 |
+
' embedding_column must be type {}, got {}.'.format(' or '.join([
|
| 114 |
+
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS
|
| 115 |
+
]), type(categorical_column)))
|
| 116 |
+
if (dimension is None) or (dimension < 1):
|
| 117 |
+
raise ValueError('Invalid dimension {}.'.format(dimension))
|
| 118 |
+
|
| 119 |
+
if (initializer is not None) and (not callable(initializer)):
|
| 120 |
+
raise ValueError('initializer must be callable if specified. '
|
| 121 |
+
'Embedding of column_name: {}'.format(
|
| 122 |
+
categorical_column.name))
|
| 123 |
+
if initializer is None:
|
| 124 |
+
initializer = init_ops.truncated_normal_initializer(
|
| 125 |
+
mean=0.0, stddev=1 / math.sqrt(dimension))
|
| 126 |
+
|
| 127 |
+
embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access
|
| 128 |
+
|
| 129 |
+
def _creator(weight_collections, scope):
|
| 130 |
+
embedding_column_layer = fc._EmbeddingColumnLayer(
|
| 131 |
+
embedding_shape=embedding_shape,
|
| 132 |
+
initializer=initializer,
|
| 133 |
+
weight_collections=weight_collections,
|
| 134 |
+
trainable=True,
|
| 135 |
+
name='embedding_column_layer')
|
| 136 |
+
return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable
|
| 137 |
+
|
| 138 |
+
column = _TPUEmbeddingColumn(
|
| 139 |
+
categorical_column=categorical_column,
|
| 140 |
+
dimension=dimension,
|
| 141 |
+
combiner=combiner,
|
| 142 |
+
layer_creator=_creator,
|
| 143 |
+
ckpt_to_load_from=None,
|
| 144 |
+
tensor_name_in_ckpt=None,
|
| 145 |
+
max_norm=None,
|
| 146 |
+
trainable=True,
|
| 147 |
+
max_sequence_length=max_sequence_length,
|
| 148 |
+
learning_rate_fn=learning_rate_fn,
|
| 149 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 150 |
+
# For Embedding column, the initializer is hidden inside the creator Fn, which
|
| 151 |
+
# is not accessible later. So, we attach it to a special field. Also note
|
| 152 |
+
# that non-TPU Embedding column and non-TPU shared Embedding column handle the
|
| 153 |
+
# initializer differently. See shared_embedding_columns for details.
|
| 154 |
+
column._tpu_initializer = initializer
|
| 155 |
+
return column
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def shared_embedding_columns(categorical_columns,
|
| 159 |
+
dimension,
|
| 160 |
+
combiner='mean',
|
| 161 |
+
initializer=None,
|
| 162 |
+
shared_embedding_collection_name=None,
|
| 163 |
+
max_sequence_lengths=None,
|
| 164 |
+
learning_rate_fn=None,
|
| 165 |
+
use_safe_embedding_lookup=True):
|
| 166 |
+
"""List of dense columns that convert from sparse, categorical input.
|
| 167 |
+
|
| 168 |
+
Note that the interface for TPU embedding_column is different from the non-TPU
|
| 169 |
+
version. The following args available for the non-TPU version are NOT
|
| 170 |
+
supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
categorical_columns: A list of categorical_columns returned from
|
| 174 |
+
categorical_column_with_identity, weighted_categorical_column,
|
| 175 |
+
categorical_column_with_vocabulary_file,
|
| 176 |
+
categorical_column_with_vocabulary_list,
|
| 177 |
+
sequence_categorical_column_with_identity,
|
| 178 |
+
sequence_categorical_column_with_vocabulary_file,
|
| 179 |
+
sequence_categorical_column_with_vocabulary_list
|
| 180 |
+
dimension: An integer specifying dimension of the embedding, must be > 0.
|
| 181 |
+
combiner: A string specifying how to reduce if there are multiple entries
|
| 182 |
+
in a single row for a non-sequence column. For more information, see
|
| 183 |
+
`tf.feature_column.embedding_column`.
|
| 184 |
+
initializer: A variable initializer function to be used in embedding
|
| 185 |
+
variable initialization. If not specified, defaults to
|
| 186 |
+
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
| 187 |
+
`1/sqrt(dimension)`.
|
| 188 |
+
shared_embedding_collection_name: Optional name of the collection where
|
| 189 |
+
shared embedding weights are added. If not given, a reasonable name will
|
| 190 |
+
be chosen based on the names of `categorical_columns`. This is also used
|
| 191 |
+
in `variable_scope` when creating shared embedding weights.
|
| 192 |
+
max_sequence_lengths: An list of non-negative integers, either None or
|
| 193 |
+
empty or the same length as the argument categorical_columns. Entries
|
| 194 |
+
corresponding to non-sequence columns must be 0 and entries corresponding
|
| 195 |
+
to sequence columns specify the max sequence length for the column. Any
|
| 196 |
+
sequence shorter then this will be padded with 0 embeddings and any
|
| 197 |
+
sequence longer will be truncated.
|
| 198 |
+
learning_rate_fn: A function that takes global step and returns learning
|
| 199 |
+
rate for the embedding table. If you intend to use the same learning rate
|
| 200 |
+
for multiple embedding tables, please ensure that you pass the exact same
|
| 201 |
+
python function to all calls of shared_embedding_columns, otherwise
|
| 202 |
+
performence may suffer.
|
| 203 |
+
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
|
| 204 |
+
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
|
| 205 |
+
there are no empty rows and all weights and ids are positive at the
|
| 206 |
+
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
|
| 207 |
+
input tensors. Defaults to true, consider turning off if the above checks
|
| 208 |
+
are not needed. Note that having empty rows will not trigger any error
|
| 209 |
+
though the output result might be 0 or omitted.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
A _TPUEmbeddingColumn.
|
| 213 |
+
|
| 214 |
+
Raises:
|
| 215 |
+
ValueError: if `dimension` not > 0.
|
| 216 |
+
ValueError: if `initializer` is specified but not callable.
|
| 217 |
+
ValueError: if `max_sequence_lengths` is specified and not the same length
|
| 218 |
+
as `categorical_columns`.
|
| 219 |
+
ValueError: if `max_sequence_lengths` is positive for a non sequence column
|
| 220 |
+
or 0 for a sequence column.
|
| 221 |
+
"""
|
| 222 |
+
for categorical_column in categorical_columns:
|
| 223 |
+
if isinstance(categorical_column, _DENYLISTED_CATEGORICAL_COLUMNS_V2):
|
| 224 |
+
raise TypeError('categorical_column for tpu '
|
| 225 |
+
' embedding_column was denylisted type '
|
| 226 |
+
f'{type(categorical_column)}')
|
| 227 |
+
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS):
|
| 228 |
+
raise TypeError(
|
| 229 |
+
'categorical_column for tpu '
|
| 230 |
+
' shared_embedding_columns must be type {}, got {}.'.format(
|
| 231 |
+
' or '.join(
|
| 232 |
+
[cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS]),
|
| 233 |
+
type(categorical_column)))
|
| 234 |
+
|
| 235 |
+
if not max_sequence_lengths:
|
| 236 |
+
max_sequence_lengths = [0] * len(categorical_columns)
|
| 237 |
+
if len(max_sequence_lengths) != len(categorical_columns):
|
| 238 |
+
raise ValueError('max_sequence_lengths and categorical_columns must be of '
|
| 239 |
+
'the same length. len(max_sequence_lengths)={} '
|
| 240 |
+
'len(categorical_columns)={}.'.format(
|
| 241 |
+
len(max_sequence_lengths), len(categorical_columns)))
|
| 242 |
+
|
| 243 |
+
if (dimension is None) or (dimension < 1):
|
| 244 |
+
raise ValueError('Invalid dimension {}.'.format(dimension))
|
| 245 |
+
|
| 246 |
+
if (initializer is not None) and (not callable(initializer)):
|
| 247 |
+
raise ValueError('initializer must be callable if specified. ')
|
| 248 |
+
if initializer is None:
|
| 249 |
+
initializer = init_ops.truncated_normal_initializer(
|
| 250 |
+
mean=0.0, stddev=1 / math.sqrt(dimension))
|
| 251 |
+
|
| 252 |
+
# Sort the columns so the default collection name is deterministic even if the
|
| 253 |
+
# user passes columns from an unsorted collection, such as dict.values().
|
| 254 |
+
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
|
| 255 |
+
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
|
| 256 |
+
|
| 257 |
+
for c in sorted_columns[1:]:
|
| 258 |
+
if num_buckets != c._num_buckets: # pylint: disable=protected-access
|
| 259 |
+
raise ValueError(
|
| 260 |
+
'To use shared_embedding_column, all categorical_columns must have '
|
| 261 |
+
'the same number of buckets. Given column: {} with buckets: {} does '
|
| 262 |
+
'not match column: {} with buckets: {}'.format(
|
| 263 |
+
sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
|
| 264 |
+
|
| 265 |
+
if not shared_embedding_collection_name:
|
| 266 |
+
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
|
| 267 |
+
shared_embedding_collection_name += '_shared_embedding'
|
| 268 |
+
|
| 269 |
+
tpu_columns = []
|
| 270 |
+
|
| 271 |
+
# Create the state (_SharedEmbeddingColumnLayer) here.
|
| 272 |
+
for categorical_column, max_sequence_length in zip(
|
| 273 |
+
categorical_columns, max_sequence_lengths):
|
| 274 |
+
column = _TPUSharedEmbeddingColumn(
|
| 275 |
+
categorical_column=categorical_column,
|
| 276 |
+
dimension=dimension,
|
| 277 |
+
combiner=combiner,
|
| 278 |
+
initializer=initializer,
|
| 279 |
+
shared_embedding_collection_name=shared_embedding_collection_name,
|
| 280 |
+
ckpt_to_load_from=None,
|
| 281 |
+
tensor_name_in_ckpt=None,
|
| 282 |
+
max_norm=None,
|
| 283 |
+
trainable=True,
|
| 284 |
+
max_sequence_length=max_sequence_length,
|
| 285 |
+
learning_rate_fn=learning_rate_fn,
|
| 286 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 287 |
+
tpu_columns.append(column)
|
| 288 |
+
|
| 289 |
+
return tpu_columns
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class _TPUBaseEmbeddingColumn(object):
|
| 293 |
+
"""Base class for TPU Embedding Column."""
|
| 294 |
+
|
| 295 |
+
def __init__(self,
|
| 296 |
+
categorical_column,
|
| 297 |
+
max_sequence_length=0,
|
| 298 |
+
learning_rate_fn=None):
|
| 299 |
+
self._tpu_categorical_column = categorical_column
|
| 300 |
+
self._max_sequence_length = max_sequence_length
|
| 301 |
+
self._learning_rate_fn = learning_rate_fn
|
| 302 |
+
if (self.is_sequence_column() and max_sequence_length < 1):
|
| 303 |
+
raise ValueError('max_sequence_length must be greater than 0 for '
|
| 304 |
+
'sequence columns. Got max_sequence_length={} for '
|
| 305 |
+
'sequence column {}.'.format(max_sequence_length,
|
| 306 |
+
categorical_column.name))
|
| 307 |
+
if (not self.is_sequence_column() and max_sequence_length != 0):
|
| 308 |
+
raise ValueError('Non zero max_seq_length={} specified for non '
|
| 309 |
+
'sequence column {}.'.format(max_sequence_length,
|
| 310 |
+
categorical_column.name))
|
| 311 |
+
|
| 312 |
+
def get_combiner(self):
|
| 313 |
+
"""Returns the embedding combiner."""
|
| 314 |
+
raise NotImplementedError('not implemented')
|
| 315 |
+
|
| 316 |
+
def get_embedding_table_size(self):
|
| 317 |
+
"""Returns the embedding table size, tuple of vocab size and dimension."""
|
| 318 |
+
raise NotImplementedError('not implemented')
|
| 319 |
+
|
| 320 |
+
def get_feature_key_name(self):
|
| 321 |
+
"""Returns the feature key name in the features dict."""
|
| 322 |
+
raise NotImplementedError('not impl')
|
| 323 |
+
|
| 324 |
+
def get_weight_key_name(self):
|
| 325 |
+
"""Return the key name for weights."""
|
| 326 |
+
raise NotImplementedError('not impl')
|
| 327 |
+
|
| 328 |
+
def get_embedding_var_name(self):
|
| 329 |
+
"""Returns the embedding variable name.
|
| 330 |
+
|
| 331 |
+
Feature key name and embedding variable name are usually one-to-one mapping.
|
| 332 |
+
But for shared embedding columns, it is many-to-one mapping.
|
| 333 |
+
"""
|
| 334 |
+
raise NotImplementedError('not impl')
|
| 335 |
+
|
| 336 |
+
def get_initializer(self):
|
| 337 |
+
"""Returns the initializer."""
|
| 338 |
+
raise NotImplementedError('not impl')
|
| 339 |
+
|
| 340 |
+
def is_categorical_column_weighted(self):
|
| 341 |
+
"""Check if the categorical column of the embedding column is weighted."""
|
| 342 |
+
raise NotImplementedError('not impl')
|
| 343 |
+
|
| 344 |
+
def is_sequence_column(self):
|
| 345 |
+
return isinstance(self._tpu_categorical_column, _SUPPORTED_SEQUENCE_COLUMNS)
|
| 346 |
+
|
| 347 |
+
def get_max_sequence_length(self):
|
| 348 |
+
return self._max_sequence_length
|
| 349 |
+
|
| 350 |
+
def get_learning_rate_fn(self):
|
| 351 |
+
return self._learning_rate_fn
|
| 352 |
+
|
| 353 |
+
def get_sequence_length_feature_key_name(self):
|
| 354 |
+
"""Get the key for the associated sequence length feature."""
|
| 355 |
+
return get_sequence_length_feature_key_name_from_feature_key_name(
|
| 356 |
+
self.get_feature_key_name())
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
class _TPUEmbeddingColumn(_TPUBaseEmbeddingColumn, fc._EmbeddingColumn):
|
| 360 |
+
"""Core Embedding Column."""
|
| 361 |
+
|
| 362 |
+
def __new__(cls,
|
| 363 |
+
categorical_column,
|
| 364 |
+
dimension,
|
| 365 |
+
combiner='mean',
|
| 366 |
+
layer_creator=None,
|
| 367 |
+
ckpt_to_load_from=None,
|
| 368 |
+
tensor_name_in_ckpt=None,
|
| 369 |
+
max_norm=None,
|
| 370 |
+
trainable=True,
|
| 371 |
+
max_sequence_length=0,
|
| 372 |
+
learning_rate_fn=None,
|
| 373 |
+
use_safe_embedding_lookup=True,
|
| 374 |
+
bypass_scope_validation=False):
|
| 375 |
+
# Note, args ckpt_to_load_from, tensor_name_in_ckpt, max_norm and trainable
|
| 376 |
+
# are not supported on TPU. They are solely for matching the signature of
|
| 377 |
+
# __new__ of parent class fc._EmbeddingColumn.
|
| 378 |
+
del bypass_scope_validation
|
| 379 |
+
# pylint: disable=redundant-keyword-arg
|
| 380 |
+
return fc._EmbeddingColumn.__new__(
|
| 381 |
+
cls,
|
| 382 |
+
categorical_column,
|
| 383 |
+
dimension,
|
| 384 |
+
combiner=combiner,
|
| 385 |
+
layer_creator=layer_creator,
|
| 386 |
+
ckpt_to_load_from=ckpt_to_load_from,
|
| 387 |
+
tensor_name_in_ckpt=tensor_name_in_ckpt,
|
| 388 |
+
max_norm=max_norm,
|
| 389 |
+
trainable=trainable,
|
| 390 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 391 |
+
|
| 392 |
+
def __init__(self,
|
| 393 |
+
categorical_column,
|
| 394 |
+
dimension,
|
| 395 |
+
combiner='mean',
|
| 396 |
+
layer_creator=None,
|
| 397 |
+
ckpt_to_load_from=None,
|
| 398 |
+
tensor_name_in_ckpt=None,
|
| 399 |
+
max_norm=None,
|
| 400 |
+
trainable=True,
|
| 401 |
+
max_sequence_length=0,
|
| 402 |
+
learning_rate_fn=None,
|
| 403 |
+
use_safe_embedding_lookup=True,
|
| 404 |
+
bypass_scope_validation=False):
|
| 405 |
+
_TPUBaseEmbeddingColumn.__init__(
|
| 406 |
+
self,
|
| 407 |
+
categorical_column,
|
| 408 |
+
max_sequence_length=max_sequence_length,
|
| 409 |
+
learning_rate_fn=learning_rate_fn)
|
| 410 |
+
self._key = None
|
| 411 |
+
# If true, scope validation is skipped to allow the same column to be used
|
| 412 |
+
# in multiple variable scopes. By default, this is False, and we expect a
|
| 413 |
+
# 1:1 mapping between feature columns and scopes.
|
| 414 |
+
self._bypass_scope_validation = bypass_scope_validation
|
| 415 |
+
|
| 416 |
+
def get_combiner(self):
|
| 417 |
+
return self.combiner
|
| 418 |
+
|
| 419 |
+
def get_embedding_table_size(self):
|
| 420 |
+
"""Returns num_ids and width."""
|
| 421 |
+
return (self.categorical_column._num_buckets, self.dimension)
|
| 422 |
+
|
| 423 |
+
def get_feature_key_name(self):
|
| 424 |
+
"""get_feature_key_name."""
|
| 425 |
+
if self.is_categorical_column_weighted():
|
| 426 |
+
return self.categorical_column.categorical_column.name
|
| 427 |
+
return self.categorical_column.name
|
| 428 |
+
|
| 429 |
+
def get_weight_key_name(self):
|
| 430 |
+
"""get_weight_key_name."""
|
| 431 |
+
if self.is_categorical_column_weighted():
|
| 432 |
+
return self.categorical_column.weight_feature_key
|
| 433 |
+
return None
|
| 434 |
+
|
| 435 |
+
def get_embedding_var_name(self):
|
| 436 |
+
"""get_embedding_var_name."""
|
| 437 |
+
return self.categorical_column.name
|
| 438 |
+
|
| 439 |
+
def get_initializer(self):
|
| 440 |
+
return self._tpu_initializer
|
| 441 |
+
|
| 442 |
+
def is_categorical_column_weighted(self):
|
| 443 |
+
"""Check if the categorical column of the embedding column is weighted."""
|
| 444 |
+
if isinstance(
|
| 445 |
+
self.categorical_column,
|
| 446 |
+
(
|
| 447 |
+
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
|
| 448 |
+
fc_lib.WeightedCategoricalColumn)):
|
| 449 |
+
return True
|
| 450 |
+
return False
|
| 451 |
+
|
| 452 |
+
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
|
| 453 |
+
if tpu.under_tpu_inference_context():
|
| 454 |
+
def host_computation():
|
| 455 |
+
return fc._EmbeddingColumn._get_dense_tensor(
|
| 456 |
+
self, inputs, weight_collections, trainable)
|
| 457 |
+
|
| 458 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 459 |
+
|
| 460 |
+
if _is_running_on_cpu():
|
| 461 |
+
return fc._EmbeddingColumn._get_dense_tensor(
|
| 462 |
+
self, inputs, weight_collections, trainable)
|
| 463 |
+
|
| 464 |
+
# TPU mode
|
| 465 |
+
# Get the embeddings from the LazyBuilder.
|
| 466 |
+
tensor = inputs.get(self.get_feature_key_name())
|
| 467 |
+
|
| 468 |
+
# Add to collection for _create_tpu_embedding_variables_and_ops
|
| 469 |
+
_record_variable_scope_and_name(
|
| 470 |
+
self.get_embedding_var_name(),
|
| 471 |
+
'embedding_weights',
|
| 472 |
+
bypass_scope_validation=self._bypass_scope_validation)
|
| 473 |
+
|
| 474 |
+
return tensor
|
| 475 |
+
|
| 476 |
+
def _get_sequence_dense_tensor(
|
| 477 |
+
self, inputs, weight_collections=None, trainable=None):
|
| 478 |
+
if tpu.under_tpu_inference_context():
|
| 479 |
+
def host_computation():
|
| 480 |
+
return fc._EmbeddingColumn._get_sequence_dense_tensor(
|
| 481 |
+
self, inputs, weight_collections, trainable)
|
| 482 |
+
|
| 483 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 484 |
+
|
| 485 |
+
if _is_running_on_cpu():
|
| 486 |
+
return fc._EmbeddingColumn._get_sequence_dense_tensor(
|
| 487 |
+
self, inputs, weight_collections, trainable)
|
| 488 |
+
|
| 489 |
+
tensor = inputs.get(self.get_feature_key_name())
|
| 490 |
+
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
|
| 491 |
+
|
| 492 |
+
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
|
| 493 |
+
# We need to undo this to match the standard CPU sequence embedding.
|
| 494 |
+
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
|
| 495 |
+
|
| 496 |
+
# Add to collection for _create_tpu_embedding_variables_and_ops
|
| 497 |
+
_record_variable_scope_and_name(
|
| 498 |
+
self.get_embedding_var_name(),
|
| 499 |
+
'embedding_weights',
|
| 500 |
+
bypass_scope_validation=self._bypass_scope_validation)
|
| 501 |
+
|
| 502 |
+
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
|
| 503 |
+
dense_tensor=tensor, sequence_length=tensor_lengths)
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
class _TPUSharedEmbeddingColumn(_TPUBaseEmbeddingColumn,
|
| 507 |
+
fc._SharedEmbeddingColumn):
|
| 508 |
+
"""Core Shared Embedding Column."""
|
| 509 |
+
|
| 510 |
+
def __new__(cls,
|
| 511 |
+
categorical_column,
|
| 512 |
+
dimension,
|
| 513 |
+
combiner='mean',
|
| 514 |
+
initializer=None,
|
| 515 |
+
shared_embedding_collection_name=None,
|
| 516 |
+
ckpt_to_load_from=None,
|
| 517 |
+
tensor_name_in_ckpt=None,
|
| 518 |
+
max_norm=None,
|
| 519 |
+
trainable=True,
|
| 520 |
+
max_sequence_length=0,
|
| 521 |
+
learning_rate_fn=None,
|
| 522 |
+
use_safe_embedding_lookup=True):
|
| 523 |
+
return fc._SharedEmbeddingColumn.__new__(
|
| 524 |
+
cls,
|
| 525 |
+
categorical_column,
|
| 526 |
+
dimension,
|
| 527 |
+
combiner=combiner,
|
| 528 |
+
initializer=initializer,
|
| 529 |
+
shared_embedding_collection_name=shared_embedding_collection_name,
|
| 530 |
+
ckpt_to_load_from=ckpt_to_load_from,
|
| 531 |
+
tensor_name_in_ckpt=tensor_name_in_ckpt,
|
| 532 |
+
max_norm=max_norm,
|
| 533 |
+
trainable=trainable,
|
| 534 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 535 |
+
|
| 536 |
+
def __init__(self,
|
| 537 |
+
categorical_column,
|
| 538 |
+
dimension,
|
| 539 |
+
combiner='mean',
|
| 540 |
+
initializer=None,
|
| 541 |
+
shared_embedding_collection_name=None,
|
| 542 |
+
ckpt_to_load_from=None,
|
| 543 |
+
tensor_name_in_ckpt=None,
|
| 544 |
+
max_norm=None,
|
| 545 |
+
trainable=True,
|
| 546 |
+
max_sequence_length=0,
|
| 547 |
+
learning_rate_fn=None,
|
| 548 |
+
use_safe_embedding_lookup=True):
|
| 549 |
+
|
| 550 |
+
_TPUBaseEmbeddingColumn.__init__(
|
| 551 |
+
self,
|
| 552 |
+
categorical_column,
|
| 553 |
+
max_sequence_length=max_sequence_length,
|
| 554 |
+
learning_rate_fn=learning_rate_fn)
|
| 555 |
+
self._key = None
|
| 556 |
+
|
| 557 |
+
def get_combiner(self):
|
| 558 |
+
return self.combiner
|
| 559 |
+
|
| 560 |
+
def get_embedding_table_size(self):
|
| 561 |
+
"""Returns num_ids and width."""
|
| 562 |
+
return (self.categorical_column._num_buckets, self.dimension)
|
| 563 |
+
|
| 564 |
+
def get_feature_key_name(self):
|
| 565 |
+
"""get_feature_key_name."""
|
| 566 |
+
if self.is_categorical_column_weighted():
|
| 567 |
+
return self.categorical_column.categorical_column.name
|
| 568 |
+
return self.categorical_column.name
|
| 569 |
+
|
| 570 |
+
def get_weight_key_name(self):
|
| 571 |
+
"""get_weight_key_name."""
|
| 572 |
+
if self.is_categorical_column_weighted():
|
| 573 |
+
return self.categorical_column.weight_feature_key
|
| 574 |
+
return None
|
| 575 |
+
|
| 576 |
+
def get_embedding_var_name(self):
|
| 577 |
+
"""get_embedding_var_name."""
|
| 578 |
+
return self.shared_embedding_collection_name
|
| 579 |
+
|
| 580 |
+
def get_initializer(self):
|
| 581 |
+
return self.initializer
|
| 582 |
+
|
| 583 |
+
def is_categorical_column_weighted(self):
|
| 584 |
+
"""Check if the categorical column of the embedding column is weighted."""
|
| 585 |
+
if isinstance(
|
| 586 |
+
self.categorical_column,
|
| 587 |
+
(
|
| 588 |
+
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
|
| 589 |
+
fc_lib.WeightedCategoricalColumn)):
|
| 590 |
+
return True
|
| 591 |
+
return False
|
| 592 |
+
|
| 593 |
+
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
|
| 594 |
+
if tpu.under_tpu_inference_context():
|
| 595 |
+
def host_computation():
|
| 596 |
+
return fc._SharedEmbeddingColumn._get_dense_tensor(
|
| 597 |
+
self, inputs, weight_collections, trainable)
|
| 598 |
+
|
| 599 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 600 |
+
|
| 601 |
+
if _is_running_on_cpu():
|
| 602 |
+
return fc._SharedEmbeddingColumn._get_dense_tensor(
|
| 603 |
+
self, inputs, weight_collections, trainable)
|
| 604 |
+
|
| 605 |
+
# TPU mode
|
| 606 |
+
# Get the embeddings from the LazyBuilder.
|
| 607 |
+
tensor = inputs.get(self.get_feature_key_name())
|
| 608 |
+
|
| 609 |
+
# Add to collection for _create_tpu_embedding_variables_and_ops
|
| 610 |
+
_record_variable_scope_and_name(
|
| 611 |
+
self.get_embedding_var_name(),
|
| 612 |
+
'embedding_weights',
|
| 613 |
+
is_shared_embedding=True)
|
| 614 |
+
return tensor
|
| 615 |
+
|
| 616 |
+
def _get_sequence_dense_tensor(
|
| 617 |
+
self, inputs, weight_collections=None, trainable=None):
|
| 618 |
+
if tpu.under_tpu_inference_context():
|
| 619 |
+
def host_computation():
|
| 620 |
+
return fc._SharedEmbeddingColumn._get_sequence_dense_tensor(
|
| 621 |
+
self, inputs, weight_collections, trainable)
|
| 622 |
+
|
| 623 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 624 |
+
|
| 625 |
+
if _is_running_on_cpu():
|
| 626 |
+
return fc._SharedEmbeddingColumn._get_sequence_dense_tensor(
|
| 627 |
+
self, inputs, weight_collections, trainable)
|
| 628 |
+
|
| 629 |
+
tensor = inputs.get(self.get_feature_key_name())
|
| 630 |
+
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
|
| 631 |
+
|
| 632 |
+
# Add to collection for _create_tpu_embedding_variables_and_ops
|
| 633 |
+
_record_variable_scope_and_name(
|
| 634 |
+
self.get_embedding_var_name(),
|
| 635 |
+
'embedding_weights',
|
| 636 |
+
is_shared_embedding=True)
|
| 637 |
+
|
| 638 |
+
return fc._SequenceDenseColumn.TensorSequenceLengthPair(
|
| 639 |
+
dense_tensor=tensor, sequence_length=tensor_lengths)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
def _record_variable_scope_and_name(embedding_var_name,
|
| 643 |
+
embedding_var_name_in_fc,
|
| 644 |
+
is_shared_embedding=False,
|
| 645 |
+
bypass_scope_validation=False):
|
| 646 |
+
"""Add embedding variable name and scope to collection."""
|
| 647 |
+
g = ops.get_default_graph()
|
| 648 |
+
collection = g.get_collection_ref(_TPU_FC_TO_SCOPE)
|
| 649 |
+
if not collection:
|
| 650 |
+
collection.append({})
|
| 651 |
+
|
| 652 |
+
var_def_dict = collection[0]
|
| 653 |
+
|
| 654 |
+
captured_scope = variable_scope.get_variable_scope()
|
| 655 |
+
captured_scope_name = captured_scope.name
|
| 656 |
+
|
| 657 |
+
if embedding_var_name in var_def_dict:
|
| 658 |
+
if (var_def_dict[embedding_var_name][0] != captured_scope_name and
|
| 659 |
+
not is_shared_embedding and not bypass_scope_validation):
|
| 660 |
+
raise ValueError(
|
| 661 |
+
'For embedding var name {}, the variable scope name is different, '
|
| 662 |
+
'got {}; expected {}'.format(embedding_var_name,
|
| 663 |
+
captured_scope_name,
|
| 664 |
+
var_def_dict[embedding_var_name][0]))
|
| 665 |
+
if var_def_dict[embedding_var_name][1] != embedding_var_name_in_fc:
|
| 666 |
+
raise ValueError(
|
| 667 |
+
'For embedding var name {}, the embedding name is different, '
|
| 668 |
+
'got {}; expected {}'.format(embedding_var_name,
|
| 669 |
+
embedding_var_name_in_fc,
|
| 670 |
+
var_def_dict[embedding_var_name][1]))
|
| 671 |
+
else:
|
| 672 |
+
var_def_dict[embedding_var_name] = (captured_scope_name,
|
| 673 |
+
embedding_var_name_in_fc)
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
def _is_running_on_cpu():
|
| 677 |
+
"""Returns True if the current context is CPU model."""
|
| 678 |
+
return tpu_function.get_tpu_context().number_of_shards is None
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
def get_sequence_length_feature_key_name_from_feature_key_name(feature_name):
|
| 682 |
+
"""Gets the name of the sequence length feature from that of the base feature.
|
| 683 |
+
|
| 684 |
+
Args:
|
| 685 |
+
feature_name: The feature key of a sequence column.
|
| 686 |
+
|
| 687 |
+
Returns:
|
| 688 |
+
A string which is the feature key for the associated feature length column.
|
| 689 |
+
"""
|
| 690 |
+
return feature_name + _SEQUENCE_FEATURE_LENGTH_POSTFIX
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/feature_column_v2.py
ADDED
|
@@ -0,0 +1,1097 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ===================================================================
|
| 15 |
+
"""TPU Feature Column Library."""
|
| 16 |
+
import copy
|
| 17 |
+
import enum
|
| 18 |
+
import math
|
| 19 |
+
from tensorflow.python.feature_column import feature_column as fc
|
| 20 |
+
from tensorflow.python.feature_column import feature_column_lib as fc_lib
|
| 21 |
+
from tensorflow.python.framework import dtypes
|
| 22 |
+
from tensorflow.python.framework import ops
|
| 23 |
+
from tensorflow.python.ops import array_ops
|
| 24 |
+
from tensorflow.python.ops import embedding_ops
|
| 25 |
+
from tensorflow.python.ops import init_ops
|
| 26 |
+
from tensorflow.python.ops import math_ops
|
| 27 |
+
from tensorflow.python.ops import sparse_ops
|
| 28 |
+
from tensorflow.python.ops import variable_scope
|
| 29 |
+
from tensorflow.python.tpu import tpu
|
| 30 |
+
from tensorflow.python.tpu import tpu_replication
|
| 31 |
+
from tensorflow.python.tpu.feature_column import _is_running_on_cpu
|
| 32 |
+
from tensorflow.python.tpu.feature_column import _record_variable_scope_and_name
|
| 33 |
+
from tensorflow.python.tpu.feature_column import _SUPPORTED_CATEGORICAL_COLUMNS_V2
|
| 34 |
+
from tensorflow.python.tpu.feature_column import _SUPPORTED_SEQUENCE_COLUMNS
|
| 35 |
+
from tensorflow.python.tpu.feature_column import _TPUBaseEmbeddingColumn
|
| 36 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 37 |
+
# pylint: disable=protected-access
|
| 38 |
+
|
| 39 |
+
_ALLOWED_DEVICES = ['cpu', 'tpu_tensor_core', 'tpu_embedding_core']
|
| 40 |
+
_TENSOR_CORE_MASK_KEY_SUFFIX = '__TENSOR_CORE_MASK'
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class EmbeddingDevice(enum.Enum):
|
| 44 |
+
CPU = 1
|
| 45 |
+
TPU_TENSOR_CORE = 2
|
| 46 |
+
TPU_EMBEDDING_CORE = 3
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@tf_export(v1=['tpu.experimental.embedding_column'])
|
| 50 |
+
def embedding_column_v2(categorical_column,
|
| 51 |
+
dimension,
|
| 52 |
+
combiner='mean',
|
| 53 |
+
initializer=None,
|
| 54 |
+
max_sequence_length=0,
|
| 55 |
+
learning_rate_fn=None,
|
| 56 |
+
embedding_lookup_device=None,
|
| 57 |
+
tensor_core_shape=None,
|
| 58 |
+
use_safe_embedding_lookup=True):
|
| 59 |
+
"""TPU version of `tf.compat.v1.feature_column.embedding_column`.
|
| 60 |
+
|
| 61 |
+
Note that the interface for `tf.tpu.experimental.embedding_column` is
|
| 62 |
+
different from that of `tf.compat.v1.feature_column.embedding_column`: The
|
| 63 |
+
following arguments are NOT supported: `ckpt_to_load_from`,
|
| 64 |
+
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
|
| 65 |
+
|
| 66 |
+
Use this function in place of `tf.compat.v1.feature_column.embedding_column`
|
| 67 |
+
when you want to use the TPU to accelerate your embedding lookups via TPU
|
| 68 |
+
embeddings.
|
| 69 |
+
|
| 70 |
+
```
|
| 71 |
+
column = tf.feature_column.categorical_column_with_identity(...)
|
| 72 |
+
tpu_column = tf.tpu.experimental.embedding_column(column, 10)
|
| 73 |
+
...
|
| 74 |
+
def model_fn(features):
|
| 75 |
+
dense_feature = tf.keras.layers.DenseFeature(tpu_column)
|
| 76 |
+
embedded_feature = dense_feature(features)
|
| 77 |
+
...
|
| 78 |
+
|
| 79 |
+
estimator = tf.estimator.tpu.TPUEstimator(
|
| 80 |
+
model_fn=model_fn,
|
| 81 |
+
...
|
| 82 |
+
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
|
| 83 |
+
column=[tpu_column],
|
| 84 |
+
...))
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
categorical_column: A categorical column returned from
|
| 89 |
+
`categorical_column_with_identity`, `weighted_categorical_column`,
|
| 90 |
+
`categorical_column_with_vocabulary_file`,
|
| 91 |
+
`categorical_column_with_vocabulary_list`,
|
| 92 |
+
`sequence_categorical_column_with_identity`,
|
| 93 |
+
`sequence_categorical_column_with_vocabulary_file`,
|
| 94 |
+
`sequence_categorical_column_with_vocabulary_list`
|
| 95 |
+
dimension: An integer specifying dimension of the embedding, must be > 0.
|
| 96 |
+
combiner: A string specifying how to reduce if there are multiple entries
|
| 97 |
+
in a single row for a non-sequence column. For more information, see
|
| 98 |
+
`tf.feature_column.embedding_column`.
|
| 99 |
+
initializer: A variable initializer function to be used in embedding
|
| 100 |
+
variable initialization. If not specified, defaults to
|
| 101 |
+
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
|
| 102 |
+
standard deviation `1/sqrt(dimension)`.
|
| 103 |
+
max_sequence_length: An non-negative integer specifying the max sequence
|
| 104 |
+
length. Any sequence shorter then this will be padded with 0 embeddings
|
| 105 |
+
and any sequence longer will be truncated. This must be positive for
|
| 106 |
+
sequence features and 0 for non-sequence features.
|
| 107 |
+
learning_rate_fn: A function that takes global step and returns learning
|
| 108 |
+
rate for the embedding table. If you intend to use the same learning rate
|
| 109 |
+
for multiple embedding tables, please ensure that you pass the exact same
|
| 110 |
+
python function to all calls of embedding_column, otherwise performence
|
| 111 |
+
may suffer.
|
| 112 |
+
embedding_lookup_device: The device on which to run the embedding lookup.
|
| 113 |
+
Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core".
|
| 114 |
+
If specifying "tpu_tensor_core", a tensor_core_shape must be supplied.
|
| 115 |
+
If not specified, the default behavior is embedding lookup on
|
| 116 |
+
"tpu_embedding_core" for training and "cpu" for inference.
|
| 117 |
+
Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"]
|
| 118 |
+
Valid options for serving : ["cpu", "tpu_tensor_core"]
|
| 119 |
+
For training, tpu_embedding_core is good for large embedding vocab (>1M),
|
| 120 |
+
otherwise, tpu_tensor_core is often sufficient.
|
| 121 |
+
For serving, doing embedding lookup on tpu_tensor_core during serving is
|
| 122 |
+
a way to reduce host cpu usage in cases where that is a bottleneck.
|
| 123 |
+
tensor_core_shape: If supplied, a list of integers which specifies
|
| 124 |
+
the intended dense shape to run embedding lookup for this feature on
|
| 125 |
+
TensorCore. The batch dimension can be left None or -1 to indicate
|
| 126 |
+
a dynamic shape. Only rank 2 shapes currently supported.
|
| 127 |
+
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
|
| 128 |
+
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
|
| 129 |
+
there are no empty rows and all weights and ids are positive at the
|
| 130 |
+
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
|
| 131 |
+
input tensors. Defaults to true, consider turning off if the above checks
|
| 132 |
+
are not needed. Note that having empty rows will not trigger any error
|
| 133 |
+
though the output result might be 0 or omitted.
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
A `_TPUEmbeddingColumnV2`.
|
| 137 |
+
|
| 138 |
+
Raises:
|
| 139 |
+
ValueError: if `dimension` not > 0.
|
| 140 |
+
ValueError: if `initializer` is specified but not callable.
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
|
| 144 |
+
raise TypeError(
|
| 145 |
+
'categorical_column for tpu '
|
| 146 |
+
'embedding_column must be type {}, got {}.'.format(' or '.join([
|
| 147 |
+
cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2
|
| 148 |
+
]), type(categorical_column)))
|
| 149 |
+
if (dimension is None) or (dimension < 1):
|
| 150 |
+
raise ValueError('Invalid dimension {}.'.format(dimension))
|
| 151 |
+
if tensor_core_shape and len(tensor_core_shape) != 2:
|
| 152 |
+
raise ValueError(
|
| 153 |
+
'tensor_core_shape must be size 2. Got {}.'.format(tensor_core_shape))
|
| 154 |
+
|
| 155 |
+
if (initializer is not None) and (not callable(initializer)):
|
| 156 |
+
raise ValueError('initializer must be callable if specified. '
|
| 157 |
+
'Embedding of column_name: {}'.format(
|
| 158 |
+
categorical_column.name))
|
| 159 |
+
if initializer is None:
|
| 160 |
+
initializer = init_ops.truncated_normal_initializer(
|
| 161 |
+
mean=0.0, stddev=1 / math.sqrt(dimension))
|
| 162 |
+
|
| 163 |
+
if (embedding_lookup_device and
|
| 164 |
+
embedding_lookup_device not in _ALLOWED_DEVICES):
|
| 165 |
+
raise ValueError(
|
| 166 |
+
f'If set, embedding_lookup_device must be in {_ALLOWED_DEVICES}')
|
| 167 |
+
|
| 168 |
+
if embedding_lookup_device == 'cpu':
|
| 169 |
+
embedding_lookup_device = EmbeddingDevice.CPU
|
| 170 |
+
elif embedding_lookup_device == 'tpu_tensor_core':
|
| 171 |
+
embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE
|
| 172 |
+
elif embedding_lookup_device == 'tpu_embedding_core':
|
| 173 |
+
embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE
|
| 174 |
+
|
| 175 |
+
if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE:
|
| 176 |
+
if not tensor_core_shape:
|
| 177 |
+
raise ValueError('Using embedding_lookup_device=tpu_tensor_core requires '
|
| 178 |
+
'tensor_core_shape to be set.')
|
| 179 |
+
if isinstance(categorical_column, _SUPPORTED_SEQUENCE_COLUMNS):
|
| 180 |
+
raise ValueError('embedding_lookup_device=tpu_tensor_core currently does '
|
| 181 |
+
'not support sequence columns.')
|
| 182 |
+
|
| 183 |
+
if not embedding_lookup_device:
|
| 184 |
+
return _TPUEmbeddingColumnV2(
|
| 185 |
+
categorical_column=categorical_column,
|
| 186 |
+
dimension=dimension,
|
| 187 |
+
combiner=combiner,
|
| 188 |
+
initializer=initializer,
|
| 189 |
+
max_sequence_length=max_sequence_length,
|
| 190 |
+
learning_rate_fn=learning_rate_fn,
|
| 191 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 192 |
+
else:
|
| 193 |
+
return _TPUDeviceSpecificEmbeddingColumnV2(
|
| 194 |
+
categorical_column=categorical_column,
|
| 195 |
+
dimension=dimension,
|
| 196 |
+
combiner=combiner,
|
| 197 |
+
initializer=initializer,
|
| 198 |
+
max_sequence_length=max_sequence_length,
|
| 199 |
+
learning_rate_fn=learning_rate_fn,
|
| 200 |
+
embedding_lookup_device=embedding_lookup_device,
|
| 201 |
+
tensor_core_shape=tensor_core_shape,
|
| 202 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@tf_export(v1=['tpu.experimental.shared_embedding_columns'])
|
| 206 |
+
def shared_embedding_columns_v2(categorical_columns,
|
| 207 |
+
dimension,
|
| 208 |
+
combiner='mean',
|
| 209 |
+
initializer=None,
|
| 210 |
+
shared_embedding_collection_name=None,
|
| 211 |
+
max_sequence_lengths=None,
|
| 212 |
+
learning_rate_fn=None,
|
| 213 |
+
embedding_lookup_device=None,
|
| 214 |
+
tensor_core_shape=None,
|
| 215 |
+
use_safe_embedding_lookup=True):
|
| 216 |
+
"""TPU version of `tf.compat.v1.feature_column.shared_embedding_columns`.
|
| 217 |
+
|
| 218 |
+
Note that the interface for `tf.tpu.experimental.shared_embedding_columns` is
|
| 219 |
+
different from that of `tf.compat.v1.feature_column.shared_embedding_columns`:
|
| 220 |
+
The following arguments are NOT supported: `ckpt_to_load_from`,
|
| 221 |
+
`tensor_name_in_ckpt`, `max_norm` and `trainable`.
|
| 222 |
+
|
| 223 |
+
Use this function in place of
|
| 224 |
+
tf.compat.v1.feature_column.shared_embedding_columns` when you want to use the
|
| 225 |
+
TPU to accelerate your embedding lookups via TPU embeddings.
|
| 226 |
+
|
| 227 |
+
```
|
| 228 |
+
column_a = tf.feature_column.categorical_column_with_identity(...)
|
| 229 |
+
column_b = tf.feature_column.categorical_column_with_identity(...)
|
| 230 |
+
tpu_columns = tf.tpu.experimental.shared_embedding_columns(
|
| 231 |
+
[column_a, column_b], 10)
|
| 232 |
+
...
|
| 233 |
+
def model_fn(features):
|
| 234 |
+
dense_feature = tf.keras.layers.DenseFeature(tpu_columns)
|
| 235 |
+
embedded_feature = dense_feature(features)
|
| 236 |
+
...
|
| 237 |
+
|
| 238 |
+
estimator = tf.estimator.tpu.TPUEstimator(
|
| 239 |
+
model_fn=model_fn,
|
| 240 |
+
...
|
| 241 |
+
embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(
|
| 242 |
+
column=tpu_columns,
|
| 243 |
+
...))
|
| 244 |
+
```
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
categorical_columns: A list of categorical columns returned from
|
| 248 |
+
`categorical_column_with_identity`, `weighted_categorical_column`,
|
| 249 |
+
`categorical_column_with_vocabulary_file`,
|
| 250 |
+
`categorical_column_with_vocabulary_list`,
|
| 251 |
+
`sequence_categorical_column_with_identity`,
|
| 252 |
+
`sequence_categorical_column_with_vocabulary_file`,
|
| 253 |
+
`sequence_categorical_column_with_vocabulary_list`
|
| 254 |
+
dimension: An integer specifying dimension of the embedding, must be > 0.
|
| 255 |
+
combiner: A string specifying how to reduce if there are multiple entries in
|
| 256 |
+
a single row for a non-sequence column. For more information, see
|
| 257 |
+
`tf.feature_column.embedding_column`.
|
| 258 |
+
initializer: A variable initializer function to be used in embedding
|
| 259 |
+
variable initialization. If not specified, defaults to
|
| 260 |
+
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
| 261 |
+
`1/sqrt(dimension)`.
|
| 262 |
+
shared_embedding_collection_name: Optional name of the collection where
|
| 263 |
+
shared embedding weights are added. If not given, a reasonable name will
|
| 264 |
+
be chosen based on the names of `categorical_columns`. This is also used
|
| 265 |
+
in `variable_scope` when creating shared embedding weights.
|
| 266 |
+
max_sequence_lengths: An list of non-negative integers, either None or empty
|
| 267 |
+
or the same length as the argument categorical_columns. Entries
|
| 268 |
+
corresponding to non-sequence columns must be 0 and entries corresponding
|
| 269 |
+
to sequence columns specify the max sequence length for the column. Any
|
| 270 |
+
sequence shorter then this will be padded with 0 embeddings and any
|
| 271 |
+
sequence longer will be truncated.
|
| 272 |
+
learning_rate_fn: A function that takes global step and returns learning
|
| 273 |
+
rate for the embedding table. If you intend to use the same learning rate
|
| 274 |
+
for multiple embedding tables, please ensure that you pass the exact same
|
| 275 |
+
python function to all calls of shared_embedding_columns, otherwise
|
| 276 |
+
performence may suffer.
|
| 277 |
+
embedding_lookup_device: The device on which to run the embedding lookup.
|
| 278 |
+
Valid options are "cpu", "tpu_tensor_core", and "tpu_embedding_core". If
|
| 279 |
+
specifying "tpu_tensor_core", a tensor_core_shape must be supplied.
|
| 280 |
+
Defaults to "cpu". If not specified, the default behavior is embedding
|
| 281 |
+
lookup on "tpu_embedding_core" for training and "cpu" for inference.
|
| 282 |
+
Valid options for training : ["tpu_embedding_core", "tpu_tensor_core"]
|
| 283 |
+
Valid options for serving : ["cpu", "tpu_tensor_core"]
|
| 284 |
+
For training, tpu_embedding_core is good for large embedding vocab (>1M),
|
| 285 |
+
otherwise, tpu_tensor_core is often sufficient.
|
| 286 |
+
For serving, doing embedding lookup on tpu_tensor_core during serving is
|
| 287 |
+
a way to reduce host cpu usage in cases where that is a bottleneck.
|
| 288 |
+
tensor_core_shape: If supplied, a list of integers which specifies the
|
| 289 |
+
intended dense shape to run embedding lookup for this feature on
|
| 290 |
+
TensorCore. The batch dimension can be left None or -1 to indicate a
|
| 291 |
+
dynamic shape. Only rank 2 shapes currently supported.
|
| 292 |
+
use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse
|
| 293 |
+
instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures
|
| 294 |
+
there are no empty rows and all weights and ids are positive at the
|
| 295 |
+
expense of extra compute cost. This only applies to rank 2 (NxM) shaped
|
| 296 |
+
input tensors. Defaults to true, consider turning off if the above checks
|
| 297 |
+
are not needed. Note that having empty rows will not trigger any error
|
| 298 |
+
though the output result might be 0 or omitted.
|
| 299 |
+
|
| 300 |
+
Returns:
|
| 301 |
+
A list of `_TPUSharedEmbeddingColumnV2`.
|
| 302 |
+
|
| 303 |
+
Raises:
|
| 304 |
+
ValueError: if `dimension` not > 0.
|
| 305 |
+
ValueError: if `initializer` is specified but not callable.
|
| 306 |
+
ValueError: if `max_sequence_lengths` is specified and not the same length
|
| 307 |
+
as `categorical_columns`.
|
| 308 |
+
ValueError: if `max_sequence_lengths` is positive for a non sequence column
|
| 309 |
+
or 0 for a sequence column.
|
| 310 |
+
"""
|
| 311 |
+
|
| 312 |
+
for categorical_column in categorical_columns:
|
| 313 |
+
if not isinstance(categorical_column, _SUPPORTED_CATEGORICAL_COLUMNS_V2):
|
| 314 |
+
raise TypeError(
|
| 315 |
+
'categorical_column for tpu '
|
| 316 |
+
' shared_embedding_columns must be type {}, got {}.'.format(
|
| 317 |
+
' or '.join(
|
| 318 |
+
[cc.__name__ for cc in _SUPPORTED_CATEGORICAL_COLUMNS_V2]),
|
| 319 |
+
type(categorical_column)))
|
| 320 |
+
|
| 321 |
+
if not max_sequence_lengths:
|
| 322 |
+
max_sequence_lengths = [0] * len(categorical_columns)
|
| 323 |
+
if len(max_sequence_lengths) != len(categorical_columns):
|
| 324 |
+
raise ValueError('max_sequence_lengths and categorical_columns must be of '
|
| 325 |
+
'the same length. len(max_sequence_lengths)={} '
|
| 326 |
+
'len(categorical_columns)={}.'.format(
|
| 327 |
+
len(max_sequence_lengths), len(categorical_columns)))
|
| 328 |
+
|
| 329 |
+
if (dimension is None) or (dimension < 1):
|
| 330 |
+
raise ValueError('Invalid dimension {}.'.format(dimension))
|
| 331 |
+
if tensor_core_shape and len(tensor_core_shape) != 2:
|
| 332 |
+
raise ValueError(
|
| 333 |
+
'tensor_core_shape must be size 2. Got {}.'.format(tensor_core_shape))
|
| 334 |
+
|
| 335 |
+
if (initializer is not None) and (not callable(initializer)):
|
| 336 |
+
raise ValueError('initializer must be callable if specified. ')
|
| 337 |
+
if initializer is None:
|
| 338 |
+
initializer = init_ops.truncated_normal_initializer(
|
| 339 |
+
mean=0.0, stddev=1 / math.sqrt(dimension))
|
| 340 |
+
|
| 341 |
+
# Sort the columns so the default collection name is deterministic even if the
|
| 342 |
+
# user passes columns from an unsorted collection, such as dict.values().
|
| 343 |
+
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
|
| 344 |
+
num_buckets = sorted_columns[0]._num_buckets # pylint: disable=protected-access
|
| 345 |
+
|
| 346 |
+
for c in sorted_columns[1:]:
|
| 347 |
+
if num_buckets != c._num_buckets: # pylint: disable=protected-access
|
| 348 |
+
raise ValueError(
|
| 349 |
+
'To use shared_embedding_column, all categorical_columns must have '
|
| 350 |
+
'the same number of buckets. Given column: {} with buckets: {} does '
|
| 351 |
+
'not match column: {} with buckets: {}'.format(
|
| 352 |
+
sorted_columns[0], num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
|
| 353 |
+
|
| 354 |
+
if not shared_embedding_collection_name:
|
| 355 |
+
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
|
| 356 |
+
shared_embedding_collection_name += '_shared_embedding'
|
| 357 |
+
|
| 358 |
+
tpu_columns = []
|
| 359 |
+
|
| 360 |
+
column_creator = fc_lib.SharedEmbeddingColumnCreator(
|
| 361 |
+
dimension=dimension, initializer=initializer, ckpt_to_load_from=None,
|
| 362 |
+
tensor_name_in_ckpt=None, num_buckets=num_buckets, trainable=None,
|
| 363 |
+
name=shared_embedding_collection_name)
|
| 364 |
+
|
| 365 |
+
if (embedding_lookup_device and
|
| 366 |
+
embedding_lookup_device not in _ALLOWED_DEVICES):
|
| 367 |
+
raise ValueError(
|
| 368 |
+
f'If set, embedding_lookup_device must be in {_ALLOWED_DEVICES}')
|
| 369 |
+
|
| 370 |
+
if embedding_lookup_device == 'cpu':
|
| 371 |
+
embedding_lookup_device = EmbeddingDevice.CPU
|
| 372 |
+
elif embedding_lookup_device == 'tpu_tensor_core':
|
| 373 |
+
embedding_lookup_device = EmbeddingDevice.TPU_TENSOR_CORE
|
| 374 |
+
elif embedding_lookup_device == 'tpu_embedding_core':
|
| 375 |
+
embedding_lookup_device = EmbeddingDevice.TPU_EMBEDDING_CORE
|
| 376 |
+
|
| 377 |
+
if embedding_lookup_device == EmbeddingDevice.TPU_TENSOR_CORE:
|
| 378 |
+
if not tensor_core_shape:
|
| 379 |
+
raise ValueError('Using embedding_lookup_device=tpu_tensor_core requires '
|
| 380 |
+
'tensor_core_shape to be set.')
|
| 381 |
+
for c in sorted_columns:
|
| 382 |
+
if isinstance(c, _SUPPORTED_SEQUENCE_COLUMNS):
|
| 383 |
+
raise ValueError('embedding_lookup_device=tpu_tensor_core currently '
|
| 384 |
+
'does not support sequence columns.')
|
| 385 |
+
|
| 386 |
+
# Create the state (_SharedEmbeddingColumnLayer) here.
|
| 387 |
+
for categorical_column, max_sequence_length in zip(
|
| 388 |
+
categorical_columns, max_sequence_lengths):
|
| 389 |
+
if not embedding_lookup_device:
|
| 390 |
+
column = _TPUSharedEmbeddingColumnV2(
|
| 391 |
+
categorical_column=categorical_column,
|
| 392 |
+
shared_embedding_column_creator=column_creator,
|
| 393 |
+
combiner=combiner,
|
| 394 |
+
initializer=initializer,
|
| 395 |
+
shared_embedding_collection_name=shared_embedding_collection_name,
|
| 396 |
+
max_sequence_length=max_sequence_length,
|
| 397 |
+
learning_rate_fn=learning_rate_fn,
|
| 398 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 399 |
+
else:
|
| 400 |
+
column = _TPUSharedDeviceSpecificEmbeddingColumnV2(
|
| 401 |
+
categorical_column=categorical_column,
|
| 402 |
+
shared_embedding_column_creator=column_creator,
|
| 403 |
+
combiner=combiner,
|
| 404 |
+
initializer=initializer,
|
| 405 |
+
shared_embedding_collection_name=shared_embedding_collection_name,
|
| 406 |
+
max_sequence_length=max_sequence_length,
|
| 407 |
+
learning_rate_fn=learning_rate_fn,
|
| 408 |
+
embedding_lookup_device=embedding_lookup_device,
|
| 409 |
+
tensor_core_shape=tensor_core_shape,
|
| 410 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 411 |
+
tpu_columns.append(column)
|
| 412 |
+
|
| 413 |
+
return tpu_columns
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
class _TPUEmbeddingColumnV2(_TPUBaseEmbeddingColumn, fc_lib.EmbeddingColumn):
|
| 417 |
+
"""Core Embedding Column."""
|
| 418 |
+
|
| 419 |
+
def __new__(cls,
|
| 420 |
+
categorical_column,
|
| 421 |
+
dimension,
|
| 422 |
+
combiner='mean',
|
| 423 |
+
initializer=None,
|
| 424 |
+
max_sequence_length=0,
|
| 425 |
+
learning_rate_fn=None,
|
| 426 |
+
use_safe_embedding_lookup=True,
|
| 427 |
+
bypass_scope_validation=False):
|
| 428 |
+
del bypass_scope_validation
|
| 429 |
+
# pylint: disable=redundant-keyword-arg
|
| 430 |
+
return fc_lib.EmbeddingColumn.__new__(
|
| 431 |
+
cls,
|
| 432 |
+
categorical_column,
|
| 433 |
+
dimension,
|
| 434 |
+
combiner=combiner,
|
| 435 |
+
initializer=initializer,
|
| 436 |
+
ckpt_to_load_from=None,
|
| 437 |
+
tensor_name_in_ckpt=None,
|
| 438 |
+
max_norm=None,
|
| 439 |
+
trainable=True,
|
| 440 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 441 |
+
|
| 442 |
+
def __getnewargs__(self):
|
| 443 |
+
return (self._tpu_categorical_column, self.dimension, self.combiner,
|
| 444 |
+
self.initializer, self._max_sequence_length, self._learning_rate_fn,
|
| 445 |
+
self.use_safe_embedding_lookup, self._bypass_scope_validation)
|
| 446 |
+
|
| 447 |
+
def __deepcopy__(self, memo):
|
| 448 |
+
return _TPUEmbeddingColumnV2(
|
| 449 |
+
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()))
|
| 450 |
+
|
| 451 |
+
def __init__(self,
|
| 452 |
+
categorical_column,
|
| 453 |
+
dimension,
|
| 454 |
+
combiner='mean',
|
| 455 |
+
initializer=None,
|
| 456 |
+
max_sequence_length=0,
|
| 457 |
+
learning_rate_fn=None,
|
| 458 |
+
use_safe_embedding_lookup=True,
|
| 459 |
+
bypass_scope_validation=False):
|
| 460 |
+
_TPUBaseEmbeddingColumn.__init__(
|
| 461 |
+
self,
|
| 462 |
+
categorical_column,
|
| 463 |
+
max_sequence_length=max_sequence_length,
|
| 464 |
+
learning_rate_fn=learning_rate_fn)
|
| 465 |
+
self._key = None
|
| 466 |
+
# If true, scope validation is skipped to allow the same column to be used
|
| 467 |
+
# in multiple variable scopes. By default, this is False, and we expect a
|
| 468 |
+
# 1:1 mapping between feature columns and scopes.
|
| 469 |
+
self._bypass_scope_validation = bypass_scope_validation
|
| 470 |
+
|
| 471 |
+
def get_combiner(self):
|
| 472 |
+
return self.combiner
|
| 473 |
+
|
| 474 |
+
def get_embedding_table_size(self):
|
| 475 |
+
"""Returns num_ids and width."""
|
| 476 |
+
return (self.categorical_column._num_buckets, self.dimension)
|
| 477 |
+
|
| 478 |
+
def get_feature_key_name(self):
|
| 479 |
+
"""get_feature_key_name."""
|
| 480 |
+
if self.is_categorical_column_weighted():
|
| 481 |
+
return self.categorical_column.categorical_column.name
|
| 482 |
+
return self.categorical_column.name
|
| 483 |
+
|
| 484 |
+
def get_weight_key_name(self):
|
| 485 |
+
"""get_weight_key_name."""
|
| 486 |
+
if self.is_categorical_column_weighted():
|
| 487 |
+
return self.categorical_column.weight_feature_key
|
| 488 |
+
return None
|
| 489 |
+
|
| 490 |
+
def get_embedding_var_name(self):
|
| 491 |
+
"""get_embedding_var_name."""
|
| 492 |
+
return self.categorical_column.name
|
| 493 |
+
|
| 494 |
+
def get_initializer(self):
|
| 495 |
+
return self.initializer
|
| 496 |
+
|
| 497 |
+
def is_categorical_column_weighted(self):
|
| 498 |
+
"""Check if the categorical column of the embedding column is weighted."""
|
| 499 |
+
if isinstance(
|
| 500 |
+
self.categorical_column,
|
| 501 |
+
(
|
| 502 |
+
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
|
| 503 |
+
fc_lib.WeightedCategoricalColumn)):
|
| 504 |
+
return True
|
| 505 |
+
return False
|
| 506 |
+
|
| 507 |
+
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
|
| 508 |
+
if tpu.under_tpu_inference_context():
|
| 509 |
+
def host_computation():
|
| 510 |
+
return fc_lib.EmbeddingColumn._get_dense_tensor(
|
| 511 |
+
self, inputs, weight_collections, trainable)
|
| 512 |
+
|
| 513 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 514 |
+
|
| 515 |
+
if _is_running_on_cpu():
|
| 516 |
+
return fc_lib.EmbeddingColumn._get_dense_tensor(
|
| 517 |
+
self, inputs, weight_collections, trainable)
|
| 518 |
+
|
| 519 |
+
# TPU mode
|
| 520 |
+
# Get the embeddings from the LazyBuilder.
|
| 521 |
+
tensor = inputs.get(self.get_feature_key_name())
|
| 522 |
+
|
| 523 |
+
# Add to collection for _create_tpu_embedding_variables_and_ops
|
| 524 |
+
_record_variable_scope_and_name(
|
| 525 |
+
self.get_embedding_var_name(),
|
| 526 |
+
'embedding_weights',
|
| 527 |
+
bypass_scope_validation=self._bypass_scope_validation)
|
| 528 |
+
|
| 529 |
+
return tensor
|
| 530 |
+
|
| 531 |
+
def create_state(self, state_manager):
|
| 532 |
+
if _is_running_on_cpu():
|
| 533 |
+
return fc_lib.EmbeddingColumn.create_state(
|
| 534 |
+
self, state_manager)
|
| 535 |
+
|
| 536 |
+
# Create state is called for the EmbeddingColumn to create its embedding
|
| 537 |
+
# variables under feature column V2, if we are on TPU so record the scope
|
| 538 |
+
# here.
|
| 539 |
+
_record_variable_scope_and_name(
|
| 540 |
+
self.get_embedding_var_name(),
|
| 541 |
+
'embedding_weights',
|
| 542 |
+
bypass_scope_validation=self._bypass_scope_validation)
|
| 543 |
+
|
| 544 |
+
def get_dense_tensor(self, transformation_cache, state_manager):
|
| 545 |
+
if tpu.under_tpu_inference_context():
|
| 546 |
+
def host_computation():
|
| 547 |
+
return fc_lib.EmbeddingColumn.get_dense_tensor(
|
| 548 |
+
self, transformation_cache, state_manager)
|
| 549 |
+
|
| 550 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 551 |
+
|
| 552 |
+
if _is_running_on_cpu():
|
| 553 |
+
return fc_lib.EmbeddingColumn.get_dense_tensor(
|
| 554 |
+
self, transformation_cache, state_manager)
|
| 555 |
+
|
| 556 |
+
# TPU mode
|
| 557 |
+
# Get the embeddings from the FeatureTransformationCache.
|
| 558 |
+
tensor = transformation_cache.get(self.get_feature_key_name(),
|
| 559 |
+
state_manager)
|
| 560 |
+
|
| 561 |
+
return tensor
|
| 562 |
+
|
| 563 |
+
def _get_sequence_dense_tensor(
|
| 564 |
+
self, inputs, weight_collections=None, trainable=None):
|
| 565 |
+
if tpu.under_tpu_inference_context():
|
| 566 |
+
def host_computation():
|
| 567 |
+
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
|
| 568 |
+
self, inputs, weight_collections, trainable)
|
| 569 |
+
|
| 570 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 571 |
+
|
| 572 |
+
if _is_running_on_cpu():
|
| 573 |
+
return fc_lib.EmbeddingColumn._get_sequence_dense_tensor(
|
| 574 |
+
self, inputs, weight_collections, trainable)
|
| 575 |
+
|
| 576 |
+
tensor = inputs.get(self.get_feature_key_name())
|
| 577 |
+
tensor_lengths = inputs.get(self.get_sequence_length_feature_key_name())
|
| 578 |
+
|
| 579 |
+
# inputs is a _LazyBuilder and for rank 1 tensors, it calls expand_dims(-1).
|
| 580 |
+
# We need to undo this to match the standard CPU sequence embedding.
|
| 581 |
+
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
|
| 582 |
+
|
| 583 |
+
# Add to collection for _create_tpu_embedding_variables_and_ops
|
| 584 |
+
_record_variable_scope_and_name(
|
| 585 |
+
self.get_embedding_var_name(),
|
| 586 |
+
'embedding_weights',
|
| 587 |
+
bypass_scope_validation=self._bypass_scope_validation)
|
| 588 |
+
|
| 589 |
+
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
|
| 590 |
+
dense_tensor=tensor, sequence_length=tensor_lengths)
|
| 591 |
+
|
| 592 |
+
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
|
| 593 |
+
if tpu.under_tpu_inference_context():
|
| 594 |
+
def host_computation():
|
| 595 |
+
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
|
| 596 |
+
self, transformation_cache, state_manager)
|
| 597 |
+
|
| 598 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 599 |
+
|
| 600 |
+
if _is_running_on_cpu():
|
| 601 |
+
return fc_lib.EmbeddingColumn.get_sequence_dense_tensor(
|
| 602 |
+
self, transformation_cache, state_manager)
|
| 603 |
+
|
| 604 |
+
tensor = transformation_cache.get(self.get_feature_key_name(),
|
| 605 |
+
state_manager)
|
| 606 |
+
tensor_lengths = transformation_cache.get(
|
| 607 |
+
self.get_sequence_length_feature_key_name(),
|
| 608 |
+
state_manager)
|
| 609 |
+
|
| 610 |
+
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
|
| 611 |
+
# to rank 2. We need to undo this to match the standard CPU sequence
|
| 612 |
+
# embedding.
|
| 613 |
+
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
|
| 614 |
+
|
| 615 |
+
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
|
| 616 |
+
dense_tensor=tensor, sequence_length=tensor_lengths)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
class _TPUSharedEmbeddingColumnV2(_TPUBaseEmbeddingColumn,
|
| 620 |
+
fc_lib.SharedEmbeddingColumn):
|
| 621 |
+
"""Core Shared Embedding Column."""
|
| 622 |
+
|
| 623 |
+
def __new__(cls,
|
| 624 |
+
categorical_column,
|
| 625 |
+
shared_embedding_column_creator,
|
| 626 |
+
combiner='mean',
|
| 627 |
+
initializer=None,
|
| 628 |
+
shared_embedding_collection_name=None,
|
| 629 |
+
max_sequence_length=0,
|
| 630 |
+
learning_rate_fn=None,
|
| 631 |
+
use_safe_embedding_lookup=True):
|
| 632 |
+
# pylint: disable=redundant-keyword-arg
|
| 633 |
+
return fc_lib.SharedEmbeddingColumn.__new__(
|
| 634 |
+
cls,
|
| 635 |
+
categorical_column,
|
| 636 |
+
combiner=combiner,
|
| 637 |
+
shared_embedding_column_creator=shared_embedding_column_creator,
|
| 638 |
+
max_norm=None,
|
| 639 |
+
use_safe_embedding_lookup=use_safe_embedding_lookup)
|
| 640 |
+
|
| 641 |
+
def __getnewargs__(self):
|
| 642 |
+
return (self._tpu_categorical_column, self.shared_embedding_column_creator,
|
| 643 |
+
self.combiner, self._initializer,
|
| 644 |
+
self._shared_embedding_collection_name, self._max_sequence_length,
|
| 645 |
+
self._learning_rate_fn)
|
| 646 |
+
|
| 647 |
+
def __deepcopy__(self, memo):
|
| 648 |
+
return _TPUSharedEmbeddingColumnV2(
|
| 649 |
+
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()))
|
| 650 |
+
|
| 651 |
+
def __init__(self,
|
| 652 |
+
categorical_column,
|
| 653 |
+
shared_embedding_column_creator,
|
| 654 |
+
combiner='mean',
|
| 655 |
+
initializer=None,
|
| 656 |
+
shared_embedding_collection_name=None,
|
| 657 |
+
max_sequence_length=0,
|
| 658 |
+
learning_rate_fn=None,
|
| 659 |
+
use_safe_embedding_lookup=True):
|
| 660 |
+
|
| 661 |
+
_TPUBaseEmbeddingColumn.__init__(
|
| 662 |
+
self,
|
| 663 |
+
categorical_column,
|
| 664 |
+
max_sequence_length=max_sequence_length,
|
| 665 |
+
learning_rate_fn=learning_rate_fn)
|
| 666 |
+
self._initializer = initializer
|
| 667 |
+
self._shared_embedding_collection_name = shared_embedding_collection_name
|
| 668 |
+
|
| 669 |
+
def get_combiner(self):
|
| 670 |
+
return self.combiner
|
| 671 |
+
|
| 672 |
+
def get_embedding_table_size(self):
|
| 673 |
+
"""Returns num_ids and width."""
|
| 674 |
+
return (self.categorical_column._num_buckets,
|
| 675 |
+
self.shared_embedding_column_creator.dimension)
|
| 676 |
+
|
| 677 |
+
def get_feature_key_name(self):
|
| 678 |
+
"""get_feature_key_name."""
|
| 679 |
+
if self.is_categorical_column_weighted():
|
| 680 |
+
return self.categorical_column.categorical_column.name
|
| 681 |
+
return self.categorical_column.name
|
| 682 |
+
|
| 683 |
+
def get_weight_key_name(self):
|
| 684 |
+
"""get_weight_key_name."""
|
| 685 |
+
if self.is_categorical_column_weighted():
|
| 686 |
+
return self.categorical_column.weight_feature_key
|
| 687 |
+
return None
|
| 688 |
+
|
| 689 |
+
def get_embedding_var_name(self):
|
| 690 |
+
"""get_embedding_var_name."""
|
| 691 |
+
return self._shared_embedding_collection_name
|
| 692 |
+
|
| 693 |
+
def get_initializer(self):
|
| 694 |
+
return self._initializer
|
| 695 |
+
|
| 696 |
+
def is_categorical_column_weighted(self):
|
| 697 |
+
"""Check if the categorical column of the embedding column is weighted."""
|
| 698 |
+
if isinstance(
|
| 699 |
+
self.categorical_column,
|
| 700 |
+
(
|
| 701 |
+
fc._WeightedCategoricalColumn, # pylint: disable=protected-access
|
| 702 |
+
fc_lib.WeightedCategoricalColumn)):
|
| 703 |
+
return True
|
| 704 |
+
return False
|
| 705 |
+
|
| 706 |
+
def _get_dense_tensor_internal(
|
| 707 |
+
self, transformation_cache, state_manager):
|
| 708 |
+
if tpu.under_tpu_inference_context():
|
| 709 |
+
def host_computation():
|
| 710 |
+
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
|
| 711 |
+
self, transformation_cache, state_manager)
|
| 712 |
+
|
| 713 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 714 |
+
|
| 715 |
+
if _is_running_on_cpu():
|
| 716 |
+
return fc_lib.SharedEmbeddingColumn._get_dense_tensor_internal(
|
| 717 |
+
self, transformation_cache, state_manager)
|
| 718 |
+
|
| 719 |
+
# TPU mode
|
| 720 |
+
# Get the embeddings from the FeatureTransformationCache.
|
| 721 |
+
tensor = transformation_cache.get(self.get_feature_key_name(),
|
| 722 |
+
state_manager)
|
| 723 |
+
|
| 724 |
+
# Add to collection for _create_tpu_embedding_variables_and_ops
|
| 725 |
+
# Note that in Feature Column V2, shared embeddings have no scope.
|
| 726 |
+
_record_variable_scope_and_name(
|
| 727 |
+
self.get_embedding_var_name(),
|
| 728 |
+
self.shared_embedding_column_creator._name,
|
| 729 |
+
is_shared_embedding=True)
|
| 730 |
+
return tensor
|
| 731 |
+
|
| 732 |
+
def get_sequence_dense_tensor(
|
| 733 |
+
self, transformation_cache, state_manager):
|
| 734 |
+
if tpu.under_tpu_inference_context():
|
| 735 |
+
def host_computation():
|
| 736 |
+
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
|
| 737 |
+
self, transformation_cache, state_manager)
|
| 738 |
+
|
| 739 |
+
return tpu_replication.outside_compilation(host_computation)
|
| 740 |
+
|
| 741 |
+
if _is_running_on_cpu():
|
| 742 |
+
return fc_lib.SharedEmbeddingColumn.get_sequence_dense_tensor(
|
| 743 |
+
self, transformation_cache, state_manager)
|
| 744 |
+
|
| 745 |
+
tensor = self._get_dense_tensor_internal(
|
| 746 |
+
transformation_cache, state_manager)
|
| 747 |
+
tensor_lengths = transformation_cache.get(
|
| 748 |
+
self.get_sequence_length_feature_key_name(),
|
| 749 |
+
state_manager)
|
| 750 |
+
|
| 751 |
+
# FeatureTransformationCache expands rank 1 tensors (like sequence length)
|
| 752 |
+
# to rank 2. We need to undo this to match the standard CPU sequence
|
| 753 |
+
# embedding.
|
| 754 |
+
tensor_lengths = array_ops.squeeze(tensor_lengths, -1)
|
| 755 |
+
|
| 756 |
+
return fc_lib.SequenceDenseColumn.TensorSequenceLengthPair(
|
| 757 |
+
dense_tensor=tensor, sequence_length=tensor_lengths)
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
def split_sequence_columns_v2(feature_columns):
|
| 761 |
+
"""Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
|
| 762 |
+
|
| 763 |
+
For use in a TPUEstimator model_fn function. E.g.
|
| 764 |
+
|
| 765 |
+
def model_fn(features):
|
| 766 |
+
sequence_columns, feature_columns = (
|
| 767 |
+
tf.tpu.feature_column.split_sequence_columns(feature_columns))
|
| 768 |
+
input = tf.feature_column.input_layer(
|
| 769 |
+
features=features, feature_columns=feature_columns)
|
| 770 |
+
sequence_features, sequence_lengths = (
|
| 771 |
+
tf.contrib.feature_column.sequence_input_layer(
|
| 772 |
+
features=features, feature_columns=sequence_columns))
|
| 773 |
+
|
| 774 |
+
Args:
|
| 775 |
+
feature_columns: A list of _TPUEmbeddingColumns to split.
|
| 776 |
+
|
| 777 |
+
Returns:
|
| 778 |
+
Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the
|
| 779 |
+
second is the non-sequence columns.
|
| 780 |
+
"""
|
| 781 |
+
sequence_columns = []
|
| 782 |
+
non_sequence_columns = []
|
| 783 |
+
for column in feature_columns:
|
| 784 |
+
if not isinstance(column, (_TPUEmbeddingColumnV2,
|
| 785 |
+
_TPUSharedEmbeddingColumnV2)):
|
| 786 |
+
raise TypeError(
|
| 787 |
+
'column must be a _TPUEmbeddingColumnV2 or '
|
| 788 |
+
f'_TPUSharedEmbeddingColumnV2 but got {type(column)} instead.')
|
| 789 |
+
if column.is_sequence_column():
|
| 790 |
+
sequence_columns.append(column)
|
| 791 |
+
else:
|
| 792 |
+
non_sequence_columns.append(column)
|
| 793 |
+
return sequence_columns, non_sequence_columns
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
def sparse_embedding_aggregate_slice(params,
|
| 797 |
+
values_and_values_mask,
|
| 798 |
+
combiner='mean',
|
| 799 |
+
name='sparse_embedding_aggregate_slice'):
|
| 800 |
+
"""Uses XLA's dynamic slice operations to perform embedding lookups.
|
| 801 |
+
|
| 802 |
+
From third_party/cloud_tpu/models/movielens/tpu_embedding.py
|
| 803 |
+
|
| 804 |
+
Args:
|
| 805 |
+
params: Tensor of embedding table. Rank 2 (table_size x embedding dim)
|
| 806 |
+
values_and_values_mask: is a two-tuple that contains: values - Tensor of
|
| 807 |
+
embedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask
|
| 808 |
+
/ weights. Rank 2 (batch x n_indices)
|
| 809 |
+
combiner: The combiner to use for the embedding lookup. Currently supports
|
| 810 |
+
'sum' and 'mean'.
|
| 811 |
+
name: Optional name scope for created ops
|
| 812 |
+
|
| 813 |
+
Returns:
|
| 814 |
+
Rank 2 tensor of aggregated (per batch element) embedding vectors.
|
| 815 |
+
|
| 816 |
+
Raises:
|
| 817 |
+
ValueError: Combiner is not supported.
|
| 818 |
+
"""
|
| 819 |
+
values, values_mask = values_and_values_mask # unpack the two-tuple
|
| 820 |
+
with ops.name_scope(name):
|
| 821 |
+
_, embedding_dimension = params.get_shape().as_list()
|
| 822 |
+
n_batch, n_indices_padded = values.get_shape().as_list()
|
| 823 |
+
if not n_batch:
|
| 824 |
+
n_batch = -1
|
| 825 |
+
|
| 826 |
+
emb_lookup = array_ops.reshape(
|
| 827 |
+
embedding_ops.embedding_lookup(
|
| 828 |
+
params, array_ops.reshape(values, [n_batch, n_indices_padded])),
|
| 829 |
+
[n_batch, n_indices_padded, embedding_dimension])
|
| 830 |
+
|
| 831 |
+
values_mask_broadcast = array_ops.reshape(values_mask,
|
| 832 |
+
[n_batch, n_indices_padded, 1])
|
| 833 |
+
aggregate_emb = math_ops.reduce_sum(
|
| 834 |
+
emb_lookup * values_mask_broadcast, axis=1)
|
| 835 |
+
if combiner == 'sum':
|
| 836 |
+
return aggregate_emb
|
| 837 |
+
elif combiner == 'mean':
|
| 838 |
+
# In the case we have an empty row, both aggregate_emb and
|
| 839 |
+
# math_ops.reduce_sum(values_mask_broadcast, axis=1) will be 0. Thus,
|
| 840 |
+
# we can take max it with a non-zero value to prevent NaNs. Note that
|
| 841 |
+
# math_ops.reduce_sum(values_mask_broadcast, axis=1) will have integer
|
| 842 |
+
# values so 1.0 is the smallest value.
|
| 843 |
+
return aggregate_emb / math_ops.maximum(
|
| 844 |
+
math_ops.reduce_sum(values_mask_broadcast, axis=1), 1.0)
|
| 845 |
+
else:
|
| 846 |
+
raise ValueError('Dense TPU Embedding does not support combiner '
|
| 847 |
+
'other than sum and mean.')
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def pad_sparse_embedding_lookup_indices(sparse_indices, padded_size):
|
| 851 |
+
"""Creates statically-sized Tensors containing indices and weights.
|
| 852 |
+
|
| 853 |
+
From third_party/cloud_tpu/models/movielens/tpu_embedding.py
|
| 854 |
+
|
| 855 |
+
Also computes sparse_indices.values % embedding_table_size, for equivalent
|
| 856 |
+
functionality to sparse_column_with_integerized_feature. The returned
|
| 857 |
+
padded weight Tensor also doubles as a mask indicating which values in
|
| 858 |
+
the returned padded indices Tensor are indices versus padded zeros.
|
| 859 |
+
|
| 860 |
+
Args:
|
| 861 |
+
sparse_indices: SparseTensor of embedding lookup indices.
|
| 862 |
+
padded_size: Number of columns of the returned Tensors. Indices which fall
|
| 863 |
+
out of bounds will be truncated to the padded size.
|
| 864 |
+
|
| 865 |
+
Returns:
|
| 866 |
+
(sparse_indices.values padded to the specified size,
|
| 867 |
+
a mask the same size as the returned padded values in which 0s
|
| 868 |
+
indicate padded locations and 1s (or values from sparse_weights)
|
| 869 |
+
indicate actual values)
|
| 870 |
+
"""
|
| 871 |
+
batch_size = sparse_indices.dense_shape[0]
|
| 872 |
+
sparse_indices = sparse_ops.sparse_slice(sparse_indices, [0, 0],
|
| 873 |
+
[batch_size, padded_size])
|
| 874 |
+
indices, values = sparse_indices.indices, sparse_indices.values
|
| 875 |
+
|
| 876 |
+
padded_values = array_ops.scatter_nd(
|
| 877 |
+
indices,
|
| 878 |
+
math_ops.cast(values, dtypes.int32),
|
| 879 |
+
shape=(batch_size, padded_size))
|
| 880 |
+
|
| 881 |
+
weights = array_ops.ones_like(values, dtype=dtypes.float32)
|
| 882 |
+
padded_mask = array_ops.scatter_nd(
|
| 883 |
+
indices, weights, shape=(batch_size, padded_size))
|
| 884 |
+
|
| 885 |
+
return padded_values, padded_mask
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
def _check_invalid_cases(embedding_lookup_device):
|
| 889 |
+
"""Checks for invalid embedding_lookup_device configurations."""
|
| 890 |
+
if (tpu.under_tpu_inference_context() and
|
| 891 |
+
embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE):
|
| 892 |
+
raise ValueError(
|
| 893 |
+
'Using embedding_lookup_device=tpu_embedding_core during inference '
|
| 894 |
+
'is not supported.')
|
| 895 |
+
if embedding_lookup_device == EmbeddingDevice.CPU:
|
| 896 |
+
if not tpu.under_tpu_inference_context():
|
| 897 |
+
raise ValueError(
|
| 898 |
+
'Using TPUEmbeddingColumn with embedding_lookup_device="cpu" '
|
| 899 |
+
'during training is not supported.')
|
| 900 |
+
|
| 901 |
+
|
| 902 |
+
class _TPUDeviceSpecificEmbeddingColumnV2(_TPUEmbeddingColumnV2):
|
| 903 |
+
"""TPUEmbeddingColumn which allows serving on TensorCore."""
|
| 904 |
+
|
| 905 |
+
def __new__(cls, *args, **kwargs):
|
| 906 |
+
# For __new__, just capture the inference dense shape and call parent.
|
| 907 |
+
if 'tensor_core_shape' in kwargs:
|
| 908 |
+
cls._tensor_core_shape = kwargs['tensor_core_shape']
|
| 909 |
+
del kwargs['tensor_core_shape']
|
| 910 |
+
if 'embedding_lookup_device' in kwargs:
|
| 911 |
+
cls._embedding_lookup_device = kwargs['embedding_lookup_device']
|
| 912 |
+
del kwargs['embedding_lookup_device']
|
| 913 |
+
return _TPUEmbeddingColumnV2.__new__(cls, *args, **kwargs) # pytype: disable=wrong-keyword-args # always-use-return-annotations
|
| 914 |
+
|
| 915 |
+
def __init__(self, *args, **kwargs):
|
| 916 |
+
# For __init__, just capture the inference dense shape and call parent.
|
| 917 |
+
if 'tensor_core_shape' in kwargs:
|
| 918 |
+
self._tensor_core_shape = kwargs['tensor_core_shape']
|
| 919 |
+
del kwargs['tensor_core_shape']
|
| 920 |
+
if 'embedding_lookup_device' in kwargs:
|
| 921 |
+
self._embedding_lookup_device = kwargs['embedding_lookup_device']
|
| 922 |
+
del kwargs['embedding_lookup_device']
|
| 923 |
+
_TPUEmbeddingColumnV2.__init__(self, *args, **kwargs)
|
| 924 |
+
|
| 925 |
+
def __deepcopy__(self, memo):
|
| 926 |
+
return _TPUDeviceSpecificEmbeddingColumnV2(
|
| 927 |
+
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
|
| 928 |
+
tensor_core_shape=self._tensor_core_shape,
|
| 929 |
+
embedding_lookup_device=self._embedding_lookup_device)
|
| 930 |
+
|
| 931 |
+
def create_state(self, state_manager):
|
| 932 |
+
_check_invalid_cases(self._embedding_lookup_device)
|
| 933 |
+
# CPU case.
|
| 934 |
+
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
|
| 935 |
+
is_cpu = is_cpu or _is_running_on_cpu()
|
| 936 |
+
if is_cpu:
|
| 937 |
+
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
|
| 938 |
+
# TPU_EMBEDDING_CORE case.
|
| 939 |
+
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
|
| 940 |
+
return super(_TPUDeviceSpecificEmbeddingColumnV2,
|
| 941 |
+
self).create_state(state_manager)
|
| 942 |
+
|
| 943 |
+
# TPU_EMBEDDING_CORE case.
|
| 944 |
+
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
|
| 945 |
+
|
| 946 |
+
def get_dense_tensor(self, transformation_cache, state_manager):
|
| 947 |
+
"""Private method that follows get_dense_tensor."""
|
| 948 |
+
_check_invalid_cases(self._embedding_lookup_device)
|
| 949 |
+
# CPU Case.
|
| 950 |
+
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
|
| 951 |
+
is_cpu = is_cpu or _is_running_on_cpu()
|
| 952 |
+
if is_cpu:
|
| 953 |
+
return super(_TPUDeviceSpecificEmbeddingColumnV2,
|
| 954 |
+
self).get_dense_tensor(transformation_cache, state_manager)
|
| 955 |
+
# TPU_EMBEDDING_CORE case.
|
| 956 |
+
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
|
| 957 |
+
return super(_TPUDeviceSpecificEmbeddingColumnV2,
|
| 958 |
+
self).get_dense_tensor(transformation_cache, state_manager)
|
| 959 |
+
|
| 960 |
+
# TPU_EMBEDDING_CORE cases.
|
| 961 |
+
if tpu.under_tpu_inference_context():
|
| 962 |
+
# For inference, use outside compile to densify and pad the input tensors.
|
| 963 |
+
sparse_tensor = transformation_cache.get(self.categorical_column.name,
|
| 964 |
+
state_manager)
|
| 965 |
+
|
| 966 |
+
def host_computation():
|
| 967 |
+
return pad_sparse_embedding_lookup_indices(sparse_tensor,
|
| 968 |
+
self._tensor_core_shape[1])
|
| 969 |
+
|
| 970 |
+
values, mask = tpu_replication.outside_compilation(host_computation)
|
| 971 |
+
else:
|
| 972 |
+
# For training, the inputs should already have been densified and padded.
|
| 973 |
+
values = transformation_cache.get(self.categorical_column.name,
|
| 974 |
+
state_manager)
|
| 975 |
+
mask = transformation_cache.get(
|
| 976 |
+
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
|
| 977 |
+
state_manager)
|
| 978 |
+
embedding_weights = state_manager.get_variable(
|
| 979 |
+
self, name='embedding_weights')
|
| 980 |
+
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
|
| 981 |
+
self.get_combiner())
|
| 982 |
+
|
| 983 |
+
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
|
| 984 |
+
_check_invalid_cases(self._embedding_lookup_device)
|
| 985 |
+
# CPU Case.
|
| 986 |
+
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
|
| 987 |
+
is_cpu = is_cpu or _is_running_on_cpu()
|
| 988 |
+
if is_cpu:
|
| 989 |
+
return super(_TPUDeviceSpecificEmbeddingColumnV2,
|
| 990 |
+
self)._get_dense_tensor(inputs, weight_collections,
|
| 991 |
+
trainable)
|
| 992 |
+
# TPU_EMBEDDING_CORE case.
|
| 993 |
+
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
|
| 994 |
+
return super(_TPUDeviceSpecificEmbeddingColumnV2,
|
| 995 |
+
self)._get_dense_tensor(inputs, weight_collections,
|
| 996 |
+
trainable)
|
| 997 |
+
|
| 998 |
+
# TPU_EMBEDDING_CORE cases.
|
| 999 |
+
if tpu.under_tpu_inference_context():
|
| 1000 |
+
# For inference, use outside compile to densify and pad the input tensors.
|
| 1001 |
+
sparse_tensor = inputs.get(self.get_feature_key_name())
|
| 1002 |
+
|
| 1003 |
+
def host_computation():
|
| 1004 |
+
return pad_sparse_embedding_lookup_indices(sparse_tensor,
|
| 1005 |
+
self._tensor_core_shape[1])
|
| 1006 |
+
|
| 1007 |
+
values, mask = tpu_replication.outside_compilation(host_computation)
|
| 1008 |
+
else:
|
| 1009 |
+
# For training, the inputs should already have been densified and padded.
|
| 1010 |
+
values = inputs.get(self.get_feature_key_name())
|
| 1011 |
+
mask = inputs.get(self.get_feature_key_name() +
|
| 1012 |
+
_TENSOR_CORE_MASK_KEY_SUFFIX)
|
| 1013 |
+
|
| 1014 |
+
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
|
| 1015 |
+
if (weight_collections and
|
| 1016 |
+
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
|
| 1017 |
+
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
|
| 1018 |
+
embedding_weights = variable_scope.get_variable(
|
| 1019 |
+
name='embedding_weights',
|
| 1020 |
+
shape=embedding_shape,
|
| 1021 |
+
dtype=dtypes.float32,
|
| 1022 |
+
initializer=self.initializer,
|
| 1023 |
+
trainable=self.trainable and trainable,
|
| 1024 |
+
collections=weight_collections)
|
| 1025 |
+
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
|
| 1026 |
+
self.get_combiner())
|
| 1027 |
+
|
| 1028 |
+
|
| 1029 |
+
class _TPUSharedDeviceSpecificEmbeddingColumnV2(_TPUSharedEmbeddingColumnV2):
|
| 1030 |
+
"""TPUSharedEmbeddingColumnV2 which allows serving on TensorCore."""
|
| 1031 |
+
|
| 1032 |
+
def __new__(cls, *args, **kwargs):
|
| 1033 |
+
# For __new__, just capture the inference dense shape and call parent.
|
| 1034 |
+
if 'tensor_core_shape' in kwargs:
|
| 1035 |
+
cls._tensor_core_shape = kwargs['tensor_core_shape']
|
| 1036 |
+
del kwargs['tensor_core_shape']
|
| 1037 |
+
if 'embedding_lookup_device' in kwargs:
|
| 1038 |
+
cls._embedding_lookup_device = kwargs['embedding_lookup_device']
|
| 1039 |
+
del kwargs['embedding_lookup_device']
|
| 1040 |
+
|
| 1041 |
+
return _TPUSharedEmbeddingColumnV2.__new__(cls, *args, **kwargs) # pytype: disable=wrong-keyword-args # always-use-return-annotations
|
| 1042 |
+
|
| 1043 |
+
def __init__(self, *args, **kwargs):
|
| 1044 |
+
# For __init__, just capture the inference dense shape and call parent.
|
| 1045 |
+
if 'tensor_core_shape' in kwargs:
|
| 1046 |
+
self._tensor_core_shape = kwargs['tensor_core_shape']
|
| 1047 |
+
del kwargs['tensor_core_shape']
|
| 1048 |
+
if 'embedding_lookup_device' in kwargs:
|
| 1049 |
+
self._embedding_lookup_device = kwargs['embedding_lookup_device']
|
| 1050 |
+
del kwargs['embedding_lookup_device']
|
| 1051 |
+
_TPUSharedEmbeddingColumnV2.__init__(self, *args, **kwargs)
|
| 1052 |
+
|
| 1053 |
+
def __deepcopy__(self, memo):
|
| 1054 |
+
return _TPUSharedDeviceSpecificEmbeddingColumnV2(
|
| 1055 |
+
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
|
| 1056 |
+
tensor_core_shape=self._tensor_core_shape,
|
| 1057 |
+
embedding_lookup_device=self._embedding_lookup_device)
|
| 1058 |
+
|
| 1059 |
+
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
|
| 1060 |
+
"""Private method that follows _get_dense_tensor_internal."""
|
| 1061 |
+
_check_invalid_cases(self._embedding_lookup_device)
|
| 1062 |
+
# CPU Case.
|
| 1063 |
+
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
|
| 1064 |
+
is_cpu = is_cpu or _is_running_on_cpu()
|
| 1065 |
+
if is_cpu:
|
| 1066 |
+
return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
|
| 1067 |
+
self)._get_dense_tensor_internal(transformation_cache,
|
| 1068 |
+
state_manager)
|
| 1069 |
+
# TPU_EMBEDDING_CORE case.
|
| 1070 |
+
if self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
|
| 1071 |
+
return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
|
| 1072 |
+
self)._get_dense_tensor_internal(transformation_cache,
|
| 1073 |
+
state_manager)
|
| 1074 |
+
|
| 1075 |
+
# TPU_EMBEDDING_CORE cases.
|
| 1076 |
+
if tpu.under_tpu_inference_context():
|
| 1077 |
+
# For inference, use outside compile to densify and pad the input tensors.
|
| 1078 |
+
sparse_tensor = transformation_cache.get(self.categorical_column.name,
|
| 1079 |
+
state_manager)
|
| 1080 |
+
|
| 1081 |
+
def host_computation():
|
| 1082 |
+
return pad_sparse_embedding_lookup_indices(sparse_tensor,
|
| 1083 |
+
self._tensor_core_shape[1])
|
| 1084 |
+
|
| 1085 |
+
values, mask = tpu_replication.outside_compilation(host_computation)
|
| 1086 |
+
else:
|
| 1087 |
+
# For training, the inputs should already have been densified and padded.
|
| 1088 |
+
values = transformation_cache.get(self.categorical_column.name,
|
| 1089 |
+
state_manager)
|
| 1090 |
+
mask = transformation_cache.get(
|
| 1091 |
+
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
|
| 1092 |
+
state_manager)
|
| 1093 |
+
|
| 1094 |
+
# Do a dense embedding lookup on TensorCore.
|
| 1095 |
+
embedding_weights = self.shared_embedding_column_creator.embedding_weights
|
| 1096 |
+
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
|
| 1097 |
+
self.get_combiner())
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/functional.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# =============================================================================
|
| 15 |
+
"""Functional operations."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.python.tpu.ops import tpu_ops
|
| 18 |
+
|
| 19 |
+
TPUPartitionedCall = tpu_ops.tpu_partitioned_call # pylint: disable=invalid-name
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/preempted_hook.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Implementation of the SessionRunHook for preemptible Cloud TPUs."""
|
| 16 |
+
|
| 17 |
+
import logging as _logging
|
| 18 |
+
import os
|
| 19 |
+
import threading
|
| 20 |
+
import time
|
| 21 |
+
|
| 22 |
+
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
|
| 23 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 24 |
+
from tensorflow.python.training import session_run_hook
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class CloudTPUPreemptedHook(session_run_hook.SessionRunHook):
|
| 28 |
+
"""The SessionRunHook for preemptible Cloud TPUs.
|
| 29 |
+
|
| 30 |
+
This is an implementation of SessionRunHook for the pre-emptible Google Cloud
|
| 31 |
+
TPU service. It attempts to close the session if the TPU is preempted, and
|
| 32 |
+
exits the coordinator process if the session cannot be closed.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, cluster):
|
| 36 |
+
self._cluster = cluster
|
| 37 |
+
|
| 38 |
+
def after_create_session(self, session, coord):
|
| 39 |
+
if tpu_cluster_resolver.is_running_in_gce():
|
| 40 |
+
self._tpu_poller = _TPUPollingThread(self._cluster, session)
|
| 41 |
+
self._tpu_poller.start()
|
| 42 |
+
|
| 43 |
+
def end(self, session):
|
| 44 |
+
self._tpu_poller.stop()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class _TPUPollingThread(threading.Thread):
|
| 48 |
+
"""A thread that polls the state of a TPU node.
|
| 49 |
+
|
| 50 |
+
When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED)
|
| 51 |
+
that's considered as not recoverable by the underlying infrastructure,
|
| 52 |
+
it attempts to close the session, and exits the entire process if the
|
| 53 |
+
session.close() stucks.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, cluster, session):
|
| 57 |
+
super(_TPUPollingThread, self).__init__()
|
| 58 |
+
|
| 59 |
+
self.daemon = True
|
| 60 |
+
self._running = True
|
| 61 |
+
self._session_closed = False
|
| 62 |
+
self._cluster = cluster
|
| 63 |
+
self._session = session
|
| 64 |
+
self._interval = 30
|
| 65 |
+
|
| 66 |
+
# Some of the Google API libraries are quite chatty, so disable them.
|
| 67 |
+
for name in ['googleapiclient.discovery', 'oauth2client.client']:
|
| 68 |
+
_logging.getLogger(name).setLevel(_logging.WARNING)
|
| 69 |
+
|
| 70 |
+
def stop(self):
|
| 71 |
+
self._running = False
|
| 72 |
+
self._session_closed = True
|
| 73 |
+
self.join()
|
| 74 |
+
|
| 75 |
+
def run(self):
|
| 76 |
+
if not tpu_cluster_resolver.is_running_in_gce():
|
| 77 |
+
logging.warning(
|
| 78 |
+
'TPUPollingThread is running in a non-GCE environment, exiting...')
|
| 79 |
+
self._running = False
|
| 80 |
+
return
|
| 81 |
+
|
| 82 |
+
while self._running:
|
| 83 |
+
recoverable = self._cluster._cloud_tpu_client.recoverable() # pylint: disable=protected-access
|
| 84 |
+
if not recoverable:
|
| 85 |
+
logging.warning(
|
| 86 |
+
'TPUPollingThread found TPU %s in state %s',
|
| 87 |
+
self._cluster._tpu, self._cluster._cloud_tpu_client.state()) # pylint: disable=protected-access
|
| 88 |
+
os._exit(1) # pylint: disable=protected-access
|
| 89 |
+
time.sleep(self._interval)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_flags.py
ADDED
|
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ========================================================================
|
| 15 |
+
"""Utilities to handle tensor tracer parameters."""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import os.path
|
| 20 |
+
import re
|
| 21 |
+
from absl import flags
|
| 22 |
+
from tensorflow.python.ops import linalg_ops
|
| 23 |
+
from tensorflow.python.ops import math_ops
|
| 24 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 25 |
+
|
| 26 |
+
TRACE_MODE_PART_TENSOR = 'part-tensor'
|
| 27 |
+
TRACE_MODE_FULL_TENSOR = 'full-tensor'
|
| 28 |
+
TRACE_MODE_FULL_TENSOR_SUMMARY = 'full_tensor_summary'
|
| 29 |
+
|
| 30 |
+
TRACE_MODE_NAN_INF = 'nan-inf'
|
| 31 |
+
TRACE_MODE_NORM = 'norm'
|
| 32 |
+
TRACE_MODE_MAX_ABS = 'max-abs'
|
| 33 |
+
TRACE_MODE_SUMMARY = 'summary'
|
| 34 |
+
TRACE_MODE_HISTORY = 'history'
|
| 35 |
+
# summary mode to collects a finite set of signatures for each traced tensor,
|
| 36 |
+
# (such as norm, max, min, mean) and dumps it using tb summaries.
|
| 37 |
+
|
| 38 |
+
# Full tensor mode dumps the whole tensor values for the traced tensors without
|
| 39 |
+
# any processing on them; using tb summaries.
|
| 40 |
+
|
| 41 |
+
_SUBMODE_BRIEF = 'brief'
|
| 42 |
+
_SUBMODE_DETAILED = 'detailed'
|
| 43 |
+
|
| 44 |
+
_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'")
|
| 45 |
+
_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"')
|
| 46 |
+
_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)')
|
| 47 |
+
_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*')
|
| 48 |
+
|
| 49 |
+
FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS'
|
| 50 |
+
FLAG_NAME_ENABLE = 'enable'
|
| 51 |
+
FLAG_NAME_TRACE_MODE = 'trace_mode'
|
| 52 |
+
FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar'
|
| 53 |
+
FLAG_NAME_SUBMODE = 'submode'
|
| 54 |
+
FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames'
|
| 55 |
+
FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes'
|
| 56 |
+
FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames'
|
| 57 |
+
FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes'
|
| 58 |
+
FLAG_NAME_TRACE_LEVEL = 'trace_level'
|
| 59 |
+
FLAG_NAME_TRACE_DIR = 'trace_dir'
|
| 60 |
+
FLAG_NAME_REPORT_FILE = 'report_file'
|
| 61 |
+
FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir'
|
| 62 |
+
FLAG_NAME_OP_RANGE = 'op_range'
|
| 63 |
+
# Folder to dump the pre (before tensor tracer updates) and post graphs (after
|
| 64 |
+
# tensor tracer updates).
|
| 65 |
+
FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs'
|
| 66 |
+
FLAG_NAME_SUMMARY_SIGNATURES = 'signatures'
|
| 67 |
+
FLAG_NAME_SUMMARY_PER_CORE = 'collect_summary_per_core'
|
| 68 |
+
FLAG_NAME_TEMP_CACHE_VAR = 'use_temp_cache'
|
| 69 |
+
FLAG_NAME_INSPECT_TRACE = 'inspect_trace'
|
| 70 |
+
FLAG_NAME_FINGERPRINT_DIR = 'use_fingerprint_subdirectory'
|
| 71 |
+
FLAG_FLUSH_SUMMARY = 'flush_summaries'
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
VALID_FLAG_NAMES = [
|
| 75 |
+
FLAG_NAME_ENABLE, FLAG_NAME_TRACE_MODE,
|
| 76 |
+
FLAG_NAME_TRACE_SCALAR_OPS,
|
| 77 |
+
FLAG_NAME_SUBMODE, FLAG_NAME_EXCLUDED_OPNAMES,
|
| 78 |
+
FLAG_NAME_EXCLUDED_OPTYPES, FLAG_NAME_INCLUDED_OPNAMES,
|
| 79 |
+
FLAG_NAME_INCLUDED_OPTYPES, FLAG_NAME_TRACE_DIR,
|
| 80 |
+
FLAG_NAME_REPORT_FILE,
|
| 81 |
+
FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR,
|
| 82 |
+
FLAG_NAME_OP_RANGE,
|
| 83 |
+
FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS, FLAG_NAME_TRACE_LEVEL,
|
| 84 |
+
FLAG_NAME_SUMMARY_SIGNATURES, FLAG_NAME_SUMMARY_PER_CORE,
|
| 85 |
+
FLAG_NAME_TEMP_CACHE_VAR, FLAG_NAME_FINGERPRINT_DIR,
|
| 86 |
+
FLAG_NAME_INSPECT_TRACE, FLAG_FLUSH_SUMMARY,
|
| 87 |
+
]
|
| 88 |
+
|
| 89 |
+
_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)')
|
| 90 |
+
_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR'
|
| 91 |
+
|
| 92 |
+
_TT_DEFAULT_TRACE_LEVEL = 3
|
| 93 |
+
_TT_PREFIX = 'tensor_tracer'
|
| 94 |
+
|
| 95 |
+
_TT_NORM = 'norm'
|
| 96 |
+
_TT_MAX = 'max'
|
| 97 |
+
_TT_MAX_ABS = 'max-abs'
|
| 98 |
+
_TT_MIN = 'min'
|
| 99 |
+
_TT_SPARSITY = 'sparsity'
|
| 100 |
+
_TT_MEAN = 'mean'
|
| 101 |
+
_TT_VAR = 'var'
|
| 102 |
+
_TT_SIZE = 'size'
|
| 103 |
+
|
| 104 |
+
TT_SUMMARY_NORM = '%s_%s' % (_TT_PREFIX, _TT_NORM)
|
| 105 |
+
TT_SUMMARY_MAX = '%s_%s' % (_TT_PREFIX, _TT_MAX)
|
| 106 |
+
TT_SUMMARY_MAX_ABS = '%s_%s' % (_TT_PREFIX, _TT_MAX_ABS)
|
| 107 |
+
TT_SUMMARY_MIN = '%s_%s' % (_TT_PREFIX, _TT_MIN)
|
| 108 |
+
TT_SUMMARY_SPARSITY = '%s_%s' % (_TT_PREFIX, _TT_SPARSITY)
|
| 109 |
+
TT_SUMMARY_MEAN = '%s_%s' % (_TT_PREFIX, _TT_MEAN)
|
| 110 |
+
TT_SUMMARY_VAR = '%s_%s' % (_TT_PREFIX, _TT_VAR)
|
| 111 |
+
TT_SUMMARY_SIZE = '%s_%s' % (_TT_PREFIX, _TT_SIZE)
|
| 112 |
+
|
| 113 |
+
TT_SUMMARY_SIGNATURES = (TT_SUMMARY_NORM, TT_SUMMARY_MAX, TT_SUMMARY_MIN,
|
| 114 |
+
TT_SUMMARY_SPARSITY, TT_SUMMARY_MEAN, TT_SUMMARY_VAR,
|
| 115 |
+
TT_SUMMARY_SIZE, TT_SUMMARY_MAX_ABS)
|
| 116 |
+
|
| 117 |
+
FLAGS = flags.FLAGS
|
| 118 |
+
|
| 119 |
+
DELTA_THRESHOLD = flags.DEFINE_float(
|
| 120 |
+
'delta_threshold',
|
| 121 |
+
default=0.5,
|
| 122 |
+
help=('Log if history based diff crosses this threshold.'))
|
| 123 |
+
TT_CHECK_FILTER = flags.DEFINE_bool(
|
| 124 |
+
'tt_check_filter',
|
| 125 |
+
default=False,
|
| 126 |
+
help='Terminate early to check op name filtering.')
|
| 127 |
+
TT_SINGLE_CORE_SUMMARIES = flags.DEFINE_bool(
|
| 128 |
+
'tt_single_core_summaries',
|
| 129 |
+
default=False,
|
| 130 |
+
help='Report single core metric and avoid aggregation.')
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class TTParameters(object):
|
| 134 |
+
"""A class that handles the parameters of Tensor Tracer."""
|
| 135 |
+
|
| 136 |
+
def __init__(self, env=None):
|
| 137 |
+
if env:
|
| 138 |
+
self._env = env
|
| 139 |
+
else:
|
| 140 |
+
self._env = os.environ
|
| 141 |
+
self._validate_flag_names()
|
| 142 |
+
self.trace_mode = self._get_trace_mode()
|
| 143 |
+
self.submode = self._get_submode()
|
| 144 |
+
self.trace_dir = self._get_trace_dir()
|
| 145 |
+
self.report_file_path = self._get_report_filepath()
|
| 146 |
+
self.op_range = self._get_op_range()
|
| 147 |
+
self.excluded_opname_re_list = self._flag_value_to_re_list(
|
| 148 |
+
FLAG_NAME_EXCLUDED_OPNAMES)
|
| 149 |
+
self.excluded_optype_re_list = self._flag_value_to_re_list(
|
| 150 |
+
FLAG_NAME_EXCLUDED_OPTYPES)
|
| 151 |
+
|
| 152 |
+
self.included_opname_re_list = self._flag_value_to_re_list(
|
| 153 |
+
FLAG_NAME_INCLUDED_OPNAMES)
|
| 154 |
+
self.included_optype_re_list = self._flag_value_to_re_list(
|
| 155 |
+
FLAG_NAME_INCLUDED_OPTYPES)
|
| 156 |
+
|
| 157 |
+
self.trace_scalar_ops = self.is_flag_on(FLAG_NAME_TRACE_SCALAR_OPS)
|
| 158 |
+
self.use_compact_trace = self.trace_mode in (TRACE_MODE_NAN_INF,
|
| 159 |
+
TRACE_MODE_NORM,
|
| 160 |
+
TRACE_MODE_HISTORY,
|
| 161 |
+
TRACE_MODE_MAX_ABS,
|
| 162 |
+
TRACE_MODE_SUMMARY)
|
| 163 |
+
self.use_temp_cache_var = self.is_flag_on(FLAG_NAME_TEMP_CACHE_VAR)
|
| 164 |
+
self.inspect_trace = self.is_flag_on(FLAG_NAME_INSPECT_TRACE)
|
| 165 |
+
self.use_fingerprint_subdir = self.is_flag_on(FLAG_NAME_FINGERPRINT_DIR)
|
| 166 |
+
|
| 167 |
+
_, self.graph_dump_path = self.get_flag_value(
|
| 168 |
+
FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS)
|
| 169 |
+
self.trace_level = self._get_flag_int_value(FLAG_NAME_TRACE_LEVEL,
|
| 170 |
+
_TT_DEFAULT_TRACE_LEVEL)
|
| 171 |
+
self.summary_signatures = self._get_summary_signatures()
|
| 172 |
+
self.collect_summary_per_core = self.is_flag_on(FLAG_NAME_SUMMARY_PER_CORE)
|
| 173 |
+
# TODO(b/199284834): Will be resolved with referenced bug.
|
| 174 |
+
if self.collect_summary_per_core:
|
| 175 |
+
logging.warning('Aggregate signatures are approximate for mean, variance'
|
| 176 |
+
' and sparsity.')
|
| 177 |
+
self.flush_summaries_with_outside_compile = self.is_flag_on(
|
| 178 |
+
FLAG_FLUSH_SUMMARY)
|
| 179 |
+
# Do not produce errors or warnings if Tensor Tracer is not enabled.
|
| 180 |
+
if self.is_enabled():
|
| 181 |
+
self._check_flag_errors()
|
| 182 |
+
|
| 183 |
+
def _check_flag_errors(self):
|
| 184 |
+
if self.trace_mode in (TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY):
|
| 185 |
+
if not self.trace_dir:
|
| 186 |
+
raise ValueError('trace_dir must be explicitly provided in '
|
| 187 |
+
'TENSOR_TRACER_FLAGS when summary mode is used.')
|
| 188 |
+
|
| 189 |
+
def _get_report_filepath(self):
|
| 190 |
+
"""Sets the path of the output report file."""
|
| 191 |
+
|
| 192 |
+
found, report_file_path = self.get_flag_value(FLAG_NAME_REPORT_FILE)
|
| 193 |
+
if found and report_file_path and self.use_test_undeclared_outputs_dir():
|
| 194 |
+
if os.path.isabs(report_file_path):
|
| 195 |
+
raise ValueError('If use_test_undeclared_outputs_dir is set,'
|
| 196 |
+
'report_file_path cannot be an absolute path (%s)'
|
| 197 |
+
%report_file_path)
|
| 198 |
+
outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
|
| 199 |
+
report_file_path = os.path.join(outputs_dir, report_file_path)
|
| 200 |
+
return report_file_path
|
| 201 |
+
|
| 202 |
+
def _get_op_range(self):
|
| 203 |
+
"""Sets the index range of the Ops that we will consider tracing."""
|
| 204 |
+
found, op_range = self.get_flag_value(FLAG_NAME_OP_RANGE)
|
| 205 |
+
if not found or not op_range:
|
| 206 |
+
op_range = (-1, -1) # this means including all ops.
|
| 207 |
+
return op_range
|
| 208 |
+
match = _OP_RANGE_PAT.match(op_range)
|
| 209 |
+
if not match:
|
| 210 |
+
op_range = (-1, -1) # this means including all ops.
|
| 211 |
+
return op_range
|
| 212 |
+
op_range = (int(match.group(1)), int(match.group(2)))
|
| 213 |
+
return op_range
|
| 214 |
+
|
| 215 |
+
def _get_trace_dir(self):
|
| 216 |
+
found, trace_dir = self.get_flag_value(FLAG_NAME_TRACE_DIR)
|
| 217 |
+
if found and trace_dir and self.use_test_undeclared_outputs_dir():
|
| 218 |
+
raise ValueError(
|
| 219 |
+
'Cannot not use --%s and --%s at the same time' %
|
| 220 |
+
(FLAG_NAME_TRACE_DIR, FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR))
|
| 221 |
+
if self.use_test_undeclared_outputs_dir():
|
| 222 |
+
trace_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
|
| 223 |
+
return trace_dir
|
| 224 |
+
|
| 225 |
+
def _get_trace_mode(self):
|
| 226 |
+
"""Checks if the given trace mode is valid."""
|
| 227 |
+
|
| 228 |
+
found, trace_mode = self.get_flag_value(FLAG_NAME_TRACE_MODE)
|
| 229 |
+
if not found or not trace_mode:
|
| 230 |
+
trace_mode = TRACE_MODE_NORM
|
| 231 |
+
valid_trace_modes = [
|
| 232 |
+
TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR,
|
| 233 |
+
TRACE_MODE_NORM, TRACE_MODE_MAX_ABS,
|
| 234 |
+
TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY,
|
| 235 |
+
TRACE_MODE_HISTORY
|
| 236 |
+
]
|
| 237 |
+
if trace_mode not in valid_trace_modes:
|
| 238 |
+
raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.'
|
| 239 |
+
'Valid trace modes are: %s'%(trace_mode,
|
| 240 |
+
valid_trace_modes))
|
| 241 |
+
return trace_mode
|
| 242 |
+
|
| 243 |
+
def is_brief_mode(self):
|
| 244 |
+
return self.submode == _SUBMODE_BRIEF
|
| 245 |
+
|
| 246 |
+
def _get_submode(self):
|
| 247 |
+
"""Checks if the given submode is valid."""
|
| 248 |
+
|
| 249 |
+
found, submode = self.get_flag_value(FLAG_NAME_SUBMODE)
|
| 250 |
+
if not found or not submode:
|
| 251 |
+
submode = _SUBMODE_DETAILED
|
| 252 |
+
if not submode:
|
| 253 |
+
return
|
| 254 |
+
valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF]
|
| 255 |
+
if submode not in valid_submodes:
|
| 256 |
+
raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.'
|
| 257 |
+
'Valid submodes are: %s'%(submode,
|
| 258 |
+
valid_submodes))
|
| 259 |
+
return submode
|
| 260 |
+
|
| 261 |
+
@staticmethod
|
| 262 |
+
def match_next_flag(tt_flags, pos):
|
| 263 |
+
"""Returns the match for the next TensorTracer flag.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
tt_flags: a string that contains the flags.
|
| 267 |
+
pos: where in flags to start the search.
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
A pair where the first element is the regular-expression
|
| 271 |
+
match found and the second element indicates if the match
|
| 272 |
+
has a value.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
match = _FLAG_DOUBLE_QUOTE_PAT.match(tt_flags, pos)
|
| 276 |
+
if match:
|
| 277 |
+
return match, True
|
| 278 |
+
match = _FLAG_SINGLE_QUOTE_PAT.match(tt_flags, pos)
|
| 279 |
+
if match:
|
| 280 |
+
return match, True
|
| 281 |
+
match = _FLAG_NO_QUOTE_PAT.match(tt_flags, pos)
|
| 282 |
+
if match:
|
| 283 |
+
return match, True
|
| 284 |
+
match = _FLAG_NO_EQUAL_PAT.match(tt_flags, pos)
|
| 285 |
+
if match:
|
| 286 |
+
# The flag is found but is not given a value.
|
| 287 |
+
return match, False
|
| 288 |
+
# The flag is not found.
|
| 289 |
+
return None, False
|
| 290 |
+
|
| 291 |
+
def _validate_flag_names(self):
|
| 292 |
+
"""Validates if the TensorTrace flags passed are valid."""
|
| 293 |
+
tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)
|
| 294 |
+
if not tensor_tracer_flags:
|
| 295 |
+
return
|
| 296 |
+
pos = 0
|
| 297 |
+
while True:
|
| 298 |
+
match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos)
|
| 299 |
+
if not match:
|
| 300 |
+
break
|
| 301 |
+
flag_name = match.group(1)
|
| 302 |
+
if flag_name not in VALID_FLAG_NAMES:
|
| 303 |
+
raise ValueError(
|
| 304 |
+
'The flag name "%s" passed via the environment variable "%s" '
|
| 305 |
+
'is invalid. Valid flag names are:'
|
| 306 |
+
'\n%s' % (flag_name, FLAGS_ENV_VAR, VALID_FLAG_NAMES))
|
| 307 |
+
pos = match.end()
|
| 308 |
+
|
| 309 |
+
def _supported_signatures(self):
|
| 310 |
+
"""Returns a tuple of supported signatures."""
|
| 311 |
+
return TT_SUMMARY_SIGNATURES
|
| 312 |
+
|
| 313 |
+
def _get_summary_signatures(self):
|
| 314 |
+
"""Verifies and returns the summary signatures.
|
| 315 |
+
|
| 316 |
+
Returns:
|
| 317 |
+
A dictionary of the signature identifiers {signature: index} that will be
|
| 318 |
+
computed when trace_mode is summary.
|
| 319 |
+
"""
|
| 320 |
+
signatures = self._flag_value_as_list(FLAG_NAME_SUMMARY_SIGNATURES)
|
| 321 |
+
supported_signatures = self._supported_signatures()
|
| 322 |
+
|
| 323 |
+
tt_signatures = []
|
| 324 |
+
for signature in signatures:
|
| 325 |
+
signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature)
|
| 326 |
+
if signature in supported_signatures:
|
| 327 |
+
tt_signatures.append(signature)
|
| 328 |
+
elif signature_with_prefix in supported_signatures:
|
| 329 |
+
tt_signatures.append(signature_with_prefix)
|
| 330 |
+
else:
|
| 331 |
+
logging.warning('Unknown signature:%s. Supported signatures: %s' %
|
| 332 |
+
(signature, supported_signatures))
|
| 333 |
+
if not tt_signatures:
|
| 334 |
+
# Default case collects norm and max only.
|
| 335 |
+
return {TT_SUMMARY_MAX_ABS: 0, TT_SUMMARY_NORM: 1}
|
| 336 |
+
else:
|
| 337 |
+
return {signature: idx for idx, signature in enumerate(tt_signatures)}
|
| 338 |
+
|
| 339 |
+
def get_signature_to_agg_fn_map(self):
|
| 340 |
+
"""Returns a map that contains the aggregate function for each signature."""
|
| 341 |
+
# TODO(b/199284834): Aggregations are not accurate for mean and sparsity if
|
| 342 |
+
# cores have a different number of elements. Variance uses the maximal core
|
| 343 |
+
# variance.
|
| 344 |
+
return {TRACE_MODE_NORM: linalg_ops.norm,
|
| 345 |
+
TRACE_MODE_HISTORY: math_ops.reduce_max,
|
| 346 |
+
TRACE_MODE_MAX_ABS: math_ops.reduce_max,
|
| 347 |
+
TRACE_MODE_NAN_INF: math_ops.reduce_max,
|
| 348 |
+
TT_SUMMARY_NORM: linalg_ops.norm,
|
| 349 |
+
TT_SUMMARY_MAX: math_ops.reduce_max,
|
| 350 |
+
TT_SUMMARY_MAX_ABS:
|
| 351 |
+
lambda t, axis=0: math_ops.reduce_max(math_ops.abs(t), # pylint: disable=g-long-lambda
|
| 352 |
+
axis=axis),
|
| 353 |
+
TT_SUMMARY_MIN: math_ops.reduce_min,
|
| 354 |
+
# Exact if each part has the same number of values.
|
| 355 |
+
TT_SUMMARY_SPARSITY: math_ops.reduce_mean,
|
| 356 |
+
TT_SUMMARY_MEAN: math_ops.reduce_mean,
|
| 357 |
+
TT_SUMMARY_VAR: math_ops.reduce_max, # Simply reduce max variance.
|
| 358 |
+
TT_SUMMARY_SIZE: math_ops.reduce_sum}
|
| 359 |
+
|
| 360 |
+
def _flag_value_as_list(self, wanted_flag_name):
|
| 361 |
+
"""Returns the string list of a TensorTracer flag.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
wanted_flag_name: the name of the flag we are looking for.
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
The list value of the flag.
|
| 368 |
+
"""
|
| 369 |
+
string_value_list = []
|
| 370 |
+
found, flag_value = self.get_flag_value(wanted_flag_name)
|
| 371 |
+
|
| 372 |
+
if found:
|
| 373 |
+
assert flag_value is not None
|
| 374 |
+
string_value_list = flag_value.split(',')
|
| 375 |
+
return string_value_list
|
| 376 |
+
|
| 377 |
+
def _flag_value_as_int_list(self, wanted_flag_name):
|
| 378 |
+
"""Returns the integer list of a TensorTracer flag.
|
| 379 |
+
|
| 380 |
+
Args:
|
| 381 |
+
wanted_flag_name: the name of the flag we are looking for.
|
| 382 |
+
|
| 383 |
+
Returns:
|
| 384 |
+
the value of the flag.
|
| 385 |
+
Raises:
|
| 386 |
+
RuntimeError: If supposedly deadcode is reached.
|
| 387 |
+
"""
|
| 388 |
+
int_list = []
|
| 389 |
+
found, flag_value = self.get_flag_value(wanted_flag_name)
|
| 390 |
+
|
| 391 |
+
if found and flag_value:
|
| 392 |
+
try:
|
| 393 |
+
integer_values = flag_value.split(',')
|
| 394 |
+
int_list = [int(int_val) for int_val in integer_values]
|
| 395 |
+
except ValueError:
|
| 396 |
+
logging.warning('Cannot convert %s to int for flag %s', int_list,
|
| 397 |
+
wanted_flag_name)
|
| 398 |
+
return int_list
|
| 399 |
+
|
| 400 |
+
def _get_flag_int_value(self, wanted_flag_name, default_value):
|
| 401 |
+
"""Returns the int value of a TensorTracer flag.
|
| 402 |
+
|
| 403 |
+
Args:
|
| 404 |
+
wanted_flag_name: the name of the flag we are looking for.
|
| 405 |
+
default_value: the default value for the flag, if not provided.
|
| 406 |
+
Returns:
|
| 407 |
+
the value of the flag.
|
| 408 |
+
Raises:
|
| 409 |
+
RuntimeError: If supposedly deadcode is reached.
|
| 410 |
+
"""
|
| 411 |
+
flag_int_value = default_value
|
| 412 |
+
found, flag_value = self.get_flag_value(wanted_flag_name)
|
| 413 |
+
|
| 414 |
+
if found:
|
| 415 |
+
try:
|
| 416 |
+
flag_int_value = int(flag_value)
|
| 417 |
+
except ValueError:
|
| 418 |
+
logging.warning('Cannot convert %s to int for flag %s' % (
|
| 419 |
+
flag_int_value, wanted_flag_name))
|
| 420 |
+
return flag_int_value
|
| 421 |
+
|
| 422 |
+
def get_flag_value(self, wanted_flag_name):
|
| 423 |
+
"""Returns the value of a TensorTracer flags.
|
| 424 |
+
|
| 425 |
+
Args:
|
| 426 |
+
wanted_flag_name: the name of the flag we are looking for.
|
| 427 |
+
|
| 428 |
+
Returns:
|
| 429 |
+
A pair where the first element indicates if the flag is
|
| 430 |
+
found and the second element is the value of the flag.
|
| 431 |
+
|
| 432 |
+
Raises:
|
| 433 |
+
RuntimeError: If supposedly deadcode is reached.
|
| 434 |
+
"""
|
| 435 |
+
|
| 436 |
+
tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)
|
| 437 |
+
if not tensor_tracer_flags:
|
| 438 |
+
return False, None
|
| 439 |
+
pos = 0
|
| 440 |
+
while True:
|
| 441 |
+
match, has_value = TTParameters.match_next_flag(
|
| 442 |
+
tensor_tracer_flags, pos)
|
| 443 |
+
if not match:
|
| 444 |
+
return False, None
|
| 445 |
+
flag_name = match.group(1)
|
| 446 |
+
if has_value:
|
| 447 |
+
flag_value = match.group(2)
|
| 448 |
+
else:
|
| 449 |
+
flag_value = None
|
| 450 |
+
if flag_name == wanted_flag_name:
|
| 451 |
+
return True, flag_value
|
| 452 |
+
pos = match.end()
|
| 453 |
+
raise RuntimeError('Invalid tensor tracer flag. Could not recognize %s.' %
|
| 454 |
+
flag_name)
|
| 455 |
+
|
| 456 |
+
def _flag_value_to_re_list(self, flag_name):
|
| 457 |
+
"""Converts list of strings to compiled RE."""
|
| 458 |
+
|
| 459 |
+
re_list = []
|
| 460 |
+
found, flag_value = self.get_flag_value(flag_name)
|
| 461 |
+
if not found or not flag_value:
|
| 462 |
+
return re_list
|
| 463 |
+
list_of_values = flag_value.split(',')
|
| 464 |
+
for v in list_of_values:
|
| 465 |
+
r = re.compile(v)
|
| 466 |
+
re_list.append(r)
|
| 467 |
+
return re_list
|
| 468 |
+
|
| 469 |
+
def is_flag_on(self, flag_name):
|
| 470 |
+
"""Returns True if the given flag is on."""
|
| 471 |
+
|
| 472 |
+
found, flag_value = self.get_flag_value(flag_name)
|
| 473 |
+
if not found:
|
| 474 |
+
return False
|
| 475 |
+
if flag_value is None:
|
| 476 |
+
return True
|
| 477 |
+
# Depends on the flag value.
|
| 478 |
+
flag_value = flag_value.lower()
|
| 479 |
+
enabled = flag_value in ['1', 't', 'true', 'y', 'yes']
|
| 480 |
+
return enabled
|
| 481 |
+
|
| 482 |
+
def is_enabled(self):
|
| 483 |
+
"""Returns True if TensorTracer is enabled."""
|
| 484 |
+
|
| 485 |
+
if self.is_flag_on(FLAG_NAME_ENABLE):
|
| 486 |
+
logging.debug('Tensor Tracer is enabled with flags %s.',
|
| 487 |
+
self._env.get(FLAGS_ENV_VAR))
|
| 488 |
+
return True
|
| 489 |
+
else:
|
| 490 |
+
return False
|
| 491 |
+
|
| 492 |
+
def use_test_undeclared_outputs_dir(self):
|
| 493 |
+
"""Decides the output directory of the report and trace files.
|
| 494 |
+
|
| 495 |
+
Args:
|
| 496 |
+
None.
|
| 497 |
+
|
| 498 |
+
Returns:
|
| 499 |
+
True if the output files should be written to the
|
| 500 |
+
test-undeclared-outputs-directory defined via an
|
| 501 |
+
env variable.
|
| 502 |
+
"""
|
| 503 |
+
|
| 504 |
+
return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/tensor_tracer_pb2.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/python/tpu/tensor_tracer.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)tensorflow/python/tpu/tensor_tracer.proto\x12\ntensorflow\x1a%tensorflow/core/framework/graph.proto\"\xb4\t\n\x12TensorTracerReport\x12\x41\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x31.tensorflow.TensorTracerReport.TensorTracerConfig\x12&\n\x08graphdef\x18\x02 \x01(\x0b\x32\x14.tensorflow.GraphDef\x12@\n\ttensordef\x18\x03 \x03(\x0b\x32-.tensorflow.TensorTracerReport.TensordefEntry\x12\x13\n\x0b\x66ingerprint\x18\x04 \x01(\t\x12\x1e\n\x16\x63oncrete_function_name\x18\x05 \x01(\t\x12\x1c\n\x14last_common_frame_no\x18\x06 \x01(\x05\x12\x0f\n\x07outputs\x18\x07 \x03(\t\x12\x42\n\rtracing_stats\x18\x08 \x01(\x0b\x32+.tensorflow.TensorTracerReport.TracingStats\x1a`\n\x0eTensordefEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12=\n\x05value\x18\x02 \x01(\x0b\x32..tensorflow.TensorTracerReport.TracedTensorDef:\x02\x38\x01\x1a\xc8\x01\n\x12TensorTracerConfig\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x01(\t\x12\x12\n\ntrace_mode\x18\x03 \x01(\t\x12\x11\n\tnum_cores\x18\x04 \x01(\x05\x12\x11\n\tnum_hosts\x18\x05 \x01(\x05\x12\x0f\n\x07submode\x18\x06 \x01(\t\x12\x1a\n\x12num_cores_per_host\x18\x07 \x01(\x05\x12\x16\n\x0eincluded_cores\x18\x08 \x03(\x05\x12\x12\n\nsignatures\x18\t \x03(\t\x1a\xef\x01\n\x0cTracingStats\x12\x15\n\rtotal_tensors\x18\x01 \x01(\x05\x12\x16\n\x0etraced_tensors\x18\x02 \x01(\x05\x12_\n\x13traced_tensor_types\x18\x03 \x03(\x0b\x32\x42.tensorflow.TensorTracerReport.TracingStats.TracedTensorTypesEntry\x12\x15\n\radded_tensors\x18\x04 \x01(\x05\x1a\x38\n\x16TracedTensorTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\xa9\x02\n\x0fTracedTensorDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x63\x61\x63he_index\x18\x02 \x01(\x05\x12\x18\n\x10trace_point_name\x18\x03 \x01(\t\x12\x11\n\tis_traced\x18\x04 \x01(\x08\x12\x13\n\x0b\x65xplanation\x18\x05 \x01(\t\x12K\n\rop_stack_info\x18\x06 \x01(\x0b\x32\x34.tensorflow.TensorTracerReport.TracedTensorDef.Stack\x1a\x64\n\x05Stack\x12\x16\n\x0estack_fn_names\x18\x01 \x03(\t\x12\x13\n\x0bstack_lines\x18\x02 \x03(\t\x12\x17\n\x0fstack_filenames\x18\x03 \x03(\t\x12\x15\n\rstack_linenos\x18\x04 \x03(\x05\x62\x06proto3')
|
| 18 |
+
|
| 19 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 20 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.python.tpu.tensor_tracer_pb2', globals())
|
| 21 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 22 |
+
|
| 23 |
+
DESCRIPTOR._options = None
|
| 24 |
+
_TENSORTRACERREPORT_TENSORDEFENTRY._options = None
|
| 25 |
+
_TENSORTRACERREPORT_TENSORDEFENTRY._serialized_options = b'8\001'
|
| 26 |
+
_TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._options = None
|
| 27 |
+
_TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._serialized_options = b'8\001'
|
| 28 |
+
_TENSORTRACERREPORT._serialized_start=97
|
| 29 |
+
_TENSORTRACERREPORT._serialized_end=1301
|
| 30 |
+
_TENSORTRACERREPORT_TENSORDEFENTRY._serialized_start=460
|
| 31 |
+
_TENSORTRACERREPORT_TENSORDEFENTRY._serialized_end=556
|
| 32 |
+
_TENSORTRACERREPORT_TENSORTRACERCONFIG._serialized_start=559
|
| 33 |
+
_TENSORTRACERREPORT_TENSORTRACERCONFIG._serialized_end=759
|
| 34 |
+
_TENSORTRACERREPORT_TRACINGSTATS._serialized_start=762
|
| 35 |
+
_TENSORTRACERREPORT_TRACINGSTATS._serialized_end=1001
|
| 36 |
+
_TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._serialized_start=945
|
| 37 |
+
_TENSORTRACERREPORT_TRACINGSTATS_TRACEDTENSORTYPESENTRY._serialized_end=1001
|
| 38 |
+
_TENSORTRACERREPORT_TRACEDTENSORDEF._serialized_start=1004
|
| 39 |
+
_TENSORTRACERREPORT_TRACEDTENSORDEF._serialized_end=1301
|
| 40 |
+
_TENSORTRACERREPORT_TRACEDTENSORDEF_STACK._serialized_start=1201
|
| 41 |
+
_TENSORTRACERREPORT_TRACEDTENSORDEF_STACK._serialized_end=1301
|
| 42 |
+
# @@protoc_insertion_point(module_scope)
|
videochat2/lib/python3.10/site-packages/tensorflow/python/tpu/topology.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ======================================
|
| 15 |
+
"""Defines the `Topology` class, that describes a TPU fabric topology."""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from tensorflow.core.protobuf.tpu import topology_pb2
|
| 20 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _tpu_device_name(job, task, device):
|
| 24 |
+
"""Returns the device name for the TPU `device` on `task` of `job`."""
|
| 25 |
+
if job is None:
|
| 26 |
+
return "/task:%d/device:TPU:%d" % (task, device)
|
| 27 |
+
else:
|
| 28 |
+
return "/job:%s/task:%d/device:TPU:%d" % (job, task, device)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _tpu_host_device_name(job, task):
|
| 32 |
+
"""Returns the device name for the CPU device on `task` of `job`."""
|
| 33 |
+
if job is None:
|
| 34 |
+
return "/task:%d/device:CPU:0" % task
|
| 35 |
+
else:
|
| 36 |
+
return "/job:%s/task:%d/device:CPU:0" % (job, task)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@tf_export("tpu.experimental.Topology")
|
| 40 |
+
class Topology(object):
|
| 41 |
+
"""Describes a set of TPU devices.
|
| 42 |
+
|
| 43 |
+
Represents both the shape of the physical mesh, and the mapping between
|
| 44 |
+
TensorFlow TPU devices to physical mesh coordinates.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None):
|
| 48 |
+
"""Builds a Topology object.
|
| 49 |
+
|
| 50 |
+
If `serialized` is not `None`, the topology is parsed from `serialized` and
|
| 51 |
+
the other arguments are ignored. Otherwise, the topology is computed from
|
| 52 |
+
`mesh_shape` and `device_coordinates`.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
serialized: A serialized `TopologyProto`, or `None`. If not `None`, the
|
| 56 |
+
serialized proto is parsed to discover the topology.
|
| 57 |
+
mesh_shape: A sequence of 4 positive integers, or `None`. If not `None`,
|
| 58 |
+
the shape of the TPU topology, in number of cores. Ignored if
|
| 59 |
+
`serialized` is not `None`.
|
| 60 |
+
device_coordinates: A rank 3 numpy array that describes the mapping from
|
| 61 |
+
TensorFlow TPU devices to TPU fabric coordinates, or `None`. If
|
| 62 |
+
specified, array is a rank 3 int32 array with shape
|
| 63 |
+
`[tasks, devices, axis]`. `tasks` is the number of tasks in the TPU
|
| 64 |
+
cluster, `devices` is the number of TPU devices per task, and `axis` is
|
| 65 |
+
the number of axes in the TPU cluster topology. Each entry gives the
|
| 66 |
+
`axis`-th coordinate in the topology of a task/device pair. TPU
|
| 67 |
+
topologies are 4-dimensional, with dimensions `(x, y, z, core number)`.
|
| 68 |
+
This arg is ignored if `serialized is not `None`.
|
| 69 |
+
|
| 70 |
+
Raises:
|
| 71 |
+
ValueError: If `serialized` does not describe a well-formed topology.
|
| 72 |
+
ValueError: If `serialized` is `None` and `mesh_shape` is not a sequence
|
| 73 |
+
of 4 positive integers.
|
| 74 |
+
ValueError: If `serialized` is `None` and `device_coordinates` is not a
|
| 75 |
+
rank 3 numpy int32 array that describes a valid coordinate mapping.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
self._serialized = serialized
|
| 79 |
+
|
| 80 |
+
if serialized:
|
| 81 |
+
self._parse_topology(serialized)
|
| 82 |
+
else:
|
| 83 |
+
self._mesh_shape = np.asarray(mesh_shape, dtype=np.int32)
|
| 84 |
+
self._device_coordinates = np.asarray(device_coordinates, np.int32)
|
| 85 |
+
if len(self._mesh_shape) != 4 or any(self._mesh_shape < 1):
|
| 86 |
+
raise ValueError("`mesh_shape` must be a sequence of 4 positive "
|
| 87 |
+
f"entries; got `mesh_shape={self._mesh_shape}`")
|
| 88 |
+
|
| 89 |
+
if (len(self._device_coordinates.shape) != 3 or
|
| 90 |
+
self._device_coordinates.shape[2] != len(self._mesh_shape)):
|
| 91 |
+
raise ValueError(
|
| 92 |
+
"`device_coordinates` must be a rank 3 int32 array "
|
| 93 |
+
"with minor dimension equal to the `mesh_shape` rank"
|
| 94 |
+
"got device_coordinates.shape={} len(device_coordinates.shape)={} device_coordinates.shape[2]={} mesh_shape={}, len(mesh_shape)={}"
|
| 95 |
+
.format(self._device_coordinates.shape,
|
| 96 |
+
len(self._device_coordinates.shape),
|
| 97 |
+
self._device_coordinates.shape[2], self._mesh_shape,
|
| 98 |
+
len(self._mesh_shape)))
|
| 99 |
+
|
| 100 |
+
self._topology_tasks, self._topology_devices = self._invert_topology()
|
| 101 |
+
|
| 102 |
+
# Coordinates of devices that are missing
|
| 103 |
+
self._missing_devices = np.argwhere(self._topology_tasks < 0)
|
| 104 |
+
|
| 105 |
+
def _parse_topology(self, serialized):
|
| 106 |
+
"""Parses a serialized `TopologyProto` into `self`."""
|
| 107 |
+
proto = topology_pb2.TopologyProto()
|
| 108 |
+
proto.ParseFromString(serialized)
|
| 109 |
+
|
| 110 |
+
self._mesh_shape = np.array(proto.mesh_shape, dtype=np.int32)
|
| 111 |
+
if len(self._mesh_shape) != 4 or any(self._mesh_shape < 1):
|
| 112 |
+
raise ValueError("`mesh_shape` must be a vector of size 4 with positive "
|
| 113 |
+
"entries; got {}".format(self._mesh_shape))
|
| 114 |
+
|
| 115 |
+
if proto.num_tasks < 0:
|
| 116 |
+
raise ValueError("`num_tasks` must be >= 0; got {}".format(
|
| 117 |
+
proto.num_tasks))
|
| 118 |
+
if proto.num_tpu_devices_per_task < 0:
|
| 119 |
+
raise ValueError("`num_tpu_devices_per_task` must be >= 0; got {}".format(
|
| 120 |
+
proto.num_tpu_devices_per_task))
|
| 121 |
+
|
| 122 |
+
expected_coordinates_size = (
|
| 123 |
+
proto.num_tasks * proto.num_tpu_devices_per_task * len(
|
| 124 |
+
proto.mesh_shape))
|
| 125 |
+
if len(proto.device_coordinates) != expected_coordinates_size:
|
| 126 |
+
raise ValueError("`device_coordinates` must have shape num_tasks ({}) * "
|
| 127 |
+
"num_tpu_devices_per_task ({}) * len(mesh_shape) ({}); "
|
| 128 |
+
"got shape {}".format(proto.num_tasks,
|
| 129 |
+
proto.num_tpu_devices_per_task,
|
| 130 |
+
proto.mesh_shape,
|
| 131 |
+
len(proto.device_coordinates)))
|
| 132 |
+
|
| 133 |
+
coords = np.array(proto.device_coordinates, dtype=np.int32)
|
| 134 |
+
if any(coords < 0):
|
| 135 |
+
raise ValueError(
|
| 136 |
+
"All values in `device_coordinates` must be >= 0, got {}"
|
| 137 |
+
.format(coords))
|
| 138 |
+
coords = coords.reshape((proto.num_tasks, proto.num_tpu_devices_per_task,
|
| 139 |
+
len(proto.mesh_shape)))
|
| 140 |
+
self._device_coordinates = coords
|
| 141 |
+
|
| 142 |
+
def _invert_topology(self):
|
| 143 |
+
"""Inverts a [task,device,axis] topology to [x,y,z] -> task/device maps."""
|
| 144 |
+
tasks = np.full(list(self.mesh_shape), -1, dtype=np.int32)
|
| 145 |
+
devices = np.full(list(self.mesh_shape), -1, dtype=np.int32)
|
| 146 |
+
for task in range(self.device_coordinates.shape[0]):
|
| 147 |
+
for device in range(self.device_coordinates.shape[1]):
|
| 148 |
+
x, y, z, core = self.device_coordinates[task, device, :]
|
| 149 |
+
tasks[x, y, z, core] = task
|
| 150 |
+
devices[x, y, z, core] = device
|
| 151 |
+
return tasks, devices
|
| 152 |
+
|
| 153 |
+
@property
|
| 154 |
+
def mesh_shape(self):
|
| 155 |
+
"""A rank 1 int32 array describing the shape of the TPU topology."""
|
| 156 |
+
return self._mesh_shape
|
| 157 |
+
|
| 158 |
+
@property
|
| 159 |
+
def mesh_rank(self):
|
| 160 |
+
"""Returns the number of dimensions in the mesh."""
|
| 161 |
+
return len(self._mesh_shape)
|
| 162 |
+
|
| 163 |
+
@property
|
| 164 |
+
def device_coordinates(self):
|
| 165 |
+
"""Describes the mapping from TPU devices to topology coordinates.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
A rank 3 int32 array with shape `[tasks, devices, axis]`.
|
| 169 |
+
`tasks` is the number of tasks in the TPU cluster, `devices` is the number
|
| 170 |
+
of TPU devices per task, and `axis` is the number of axes in the TPU
|
| 171 |
+
cluster topology. Each entry gives the `axis`-th coordinate in the
|
| 172 |
+
topology of a task/device pair. TPU topologies are 4-dimensional, with
|
| 173 |
+
dimensions `(x, y, z, core number)`.
|
| 174 |
+
"""
|
| 175 |
+
return self._device_coordinates
|
| 176 |
+
|
| 177 |
+
@property
|
| 178 |
+
def missing_devices(self):
|
| 179 |
+
"""Array of indices of missing devices."""
|
| 180 |
+
return self._missing_devices
|
| 181 |
+
|
| 182 |
+
def task_ordinal_at_coordinates(self, device_coordinates):
|
| 183 |
+
"""Returns the TensorFlow task number attached to `device_coordinates`.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
device_coordinates: An integer sequence describing a device's physical
|
| 187 |
+
coordinates in the TPU fabric.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
Returns the TensorFlow task number that contains the TPU device with those
|
| 191 |
+
physical coordinates.
|
| 192 |
+
"""
|
| 193 |
+
return self._topology_tasks[tuple(device_coordinates)]
|
| 194 |
+
|
| 195 |
+
def tpu_device_ordinal_at_coordinates(self, device_coordinates):
|
| 196 |
+
"""Returns the TensorFlow device number at `device_coordinates`.
|
| 197 |
+
|
| 198 |
+
Args:
|
| 199 |
+
device_coordinates: An integer sequence describing a device's physical
|
| 200 |
+
coordinates in the TPU fabric.
|
| 201 |
+
|
| 202 |
+
Returns:
|
| 203 |
+
Returns the TensorFlow device number within the task corresponding to
|
| 204 |
+
attached to the device with those physical coordinates.
|
| 205 |
+
"""
|
| 206 |
+
return self._topology_devices[tuple(device_coordinates)]
|
| 207 |
+
|
| 208 |
+
def cpu_device_name_at_coordinates(self, device_coordinates, job=None):
|
| 209 |
+
"""Returns the CPU device attached to a logical core."""
|
| 210 |
+
return _tpu_host_device_name(
|
| 211 |
+
job, self._topology_tasks[tuple(device_coordinates)])
|
| 212 |
+
|
| 213 |
+
def tpu_device_name_at_coordinates(self, device_coordinates, job=None):
|
| 214 |
+
"""Returns the name of the TPU device assigned to a logical core."""
|
| 215 |
+
return _tpu_device_name(job,
|
| 216 |
+
self._topology_tasks[tuple(device_coordinates)],
|
| 217 |
+
self._topology_devices[tuple(device_coordinates)])
|
| 218 |
+
|
| 219 |
+
@property
|
| 220 |
+
def num_tasks(self):
|
| 221 |
+
"""Returns the number of TensorFlow tasks in the TPU slice."""
|
| 222 |
+
return self._device_coordinates.shape[0]
|
| 223 |
+
|
| 224 |
+
@property
|
| 225 |
+
def num_tpus_per_task(self):
|
| 226 |
+
"""Returns the number of TPU devices per task in the TPU slice."""
|
| 227 |
+
return self._device_coordinates.shape[1]
|
| 228 |
+
|
| 229 |
+
def serialized(self):
|
| 230 |
+
"""Returns the serialized form of the topology."""
|
| 231 |
+
if self._serialized is None:
|
| 232 |
+
proto = topology_pb2.TopologyProto()
|
| 233 |
+
proto.mesh_shape[:] = list(self._mesh_shape)
|
| 234 |
+
proto.num_tasks = self._device_coordinates.shape[0]
|
| 235 |
+
proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]
|
| 236 |
+
proto.device_coordinates.extend(list(self._device_coordinates.flatten()))
|
| 237 |
+
self._serialized = proto.SerializeToString()
|
| 238 |
+
|
| 239 |
+
return self._serialized
|